Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.762
      1 /*	$NetBSD: if_wm.c,v 1.762 2022/08/12 10:58:45 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.762 2022/08/12 10:58:45 riastradh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 
     94 #include <sys/atomic.h>
     95 #include <sys/callout.h>
     96 #include <sys/cpu.h>
     97 #include <sys/device.h>
     98 #include <sys/errno.h>
     99 #include <sys/interrupt.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/kernel.h>
    102 #include <sys/kmem.h>
    103 #include <sys/mbuf.h>
    104 #include <sys/pcq.h>
    105 #include <sys/queue.h>
    106 #include <sys/rndsource.h>
    107 #include <sys/socket.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/syslog.h>
    110 #include <sys/systm.h>
    111 #include <sys/workqueue.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <net/rss_config.h>
    121 
    122 #include <netinet/in.h>			/* XXX for struct ip */
    123 #include <netinet/in_systm.h>		/* XXX for struct ip */
    124 #include <netinet/ip.h>			/* XXX for struct ip */
    125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    127 
    128 #include <sys/bus.h>
    129 #include <sys/intr.h>
    130 #include <machine/endian.h>
    131 
    132 #include <dev/mii/mii.h>
    133 #include <dev/mii/mdio.h>
    134 #include <dev/mii/miivar.h>
    135 #include <dev/mii/miidevs.h>
    136 #include <dev/mii/mii_bitbang.h>
    137 #include <dev/mii/ikphyreg.h>
    138 #include <dev/mii/igphyreg.h>
    139 #include <dev/mii/igphyvar.h>
    140 #include <dev/mii/inbmphyreg.h>
    141 #include <dev/mii/ihphyreg.h>
    142 #include <dev/mii/makphyreg.h>
    143 
    144 #include <dev/pci/pcireg.h>
    145 #include <dev/pci/pcivar.h>
    146 #include <dev/pci/pcidevs.h>
    147 
    148 #include <dev/pci/if_wmreg.h>
    149 #include <dev/pci/if_wmvar.h>
    150 
    151 #ifdef WM_DEBUG
    152 #define	WM_DEBUG_LINK		__BIT(0)
    153 #define	WM_DEBUG_TX		__BIT(1)
    154 #define	WM_DEBUG_RX		__BIT(2)
    155 #define	WM_DEBUG_GMII		__BIT(3)
    156 #define	WM_DEBUG_MANAGE		__BIT(4)
    157 #define	WM_DEBUG_NVM		__BIT(5)
    158 #define	WM_DEBUG_INIT		__BIT(6)
    159 #define	WM_DEBUG_LOCK		__BIT(7)
    160 
    161 #if 0
    162 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    163 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    164 	WM_DEBUG_LOCK
    165 #endif
    166 
    167 #define	DPRINTF(sc, x, y)			  \
    168 	do {					  \
    169 		if ((sc)->sc_debug & (x))	  \
    170 			printf y;		  \
    171 	} while (0)
    172 #else
    173 #define	DPRINTF(sc, x, y)	__nothing
    174 #endif /* WM_DEBUG */
    175 
    176 #ifdef NET_MPSAFE
    177 #define WM_MPSAFE	1
    178 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    179 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    180 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    181 #else
    182 #define WM_CALLOUT_FLAGS	0
    183 #define WM_SOFTINT_FLAGS	0
    184 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    185 #endif
    186 
    187 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    188 
    189 /*
    190  * This device driver's max interrupt numbers.
    191  */
    192 #define WM_MAX_NQUEUEINTR	16
    193 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    194 
    195 #ifndef WM_DISABLE_MSI
    196 #define	WM_DISABLE_MSI 0
    197 #endif
    198 #ifndef WM_DISABLE_MSIX
    199 #define	WM_DISABLE_MSIX 0
    200 #endif
    201 
    202 int wm_disable_msi = WM_DISABLE_MSI;
    203 int wm_disable_msix = WM_DISABLE_MSIX;
    204 
    205 #ifndef WM_WATCHDOG_TIMEOUT
    206 #define WM_WATCHDOG_TIMEOUT 5
    207 #endif
    208 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    209 
    210 /*
    211  * Transmit descriptor list size.  Due to errata, we can only have
    212  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    213  * on >= 82544. We tell the upper layers that they can queue a lot
    214  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    215  * of them at a time.
    216  *
    217  * We allow up to 64 DMA segments per packet.  Pathological packet
    218  * chains containing many small mbufs have been observed in zero-copy
    219  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    220  * m_defrag() is called to reduce it.
    221  */
    222 #define	WM_NTXSEGS		64
    223 #define	WM_IFQUEUELEN		256
    224 #define	WM_TXQUEUELEN_MAX	64
    225 #define	WM_TXQUEUELEN_MAX_82547	16
    226 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    227 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    228 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    229 #define	WM_NTXDESC_82542	256
    230 #define	WM_NTXDESC_82544	4096
    231 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    232 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    233 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    234 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    235 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    236 
    237 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    238 
    239 #define	WM_TXINTERQSIZE		256
    240 
    241 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    242 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    243 #endif
    244 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    245 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    246 #endif
    247 
    248 /*
    249  * Receive descriptor list size.  We have one Rx buffer for normal
    250  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    251  * packet.  We allocate 256 receive descriptors, each with a 2k
    252  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    253  */
    254 #define	WM_NRXDESC		256U
    255 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    256 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    257 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    258 
    259 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    260 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    261 #endif
    262 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    263 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    264 #endif
    265 
    266 typedef union txdescs {
    267 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    268 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    269 } txdescs_t;
    270 
    271 typedef union rxdescs {
    272 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    273 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    274 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    275 } rxdescs_t;
    276 
    277 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    278 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    279 
    280 /*
    281  * Software state for transmit jobs.
    282  */
    283 struct wm_txsoft {
    284 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    285 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    286 	int txs_firstdesc;		/* first descriptor in packet */
    287 	int txs_lastdesc;		/* last descriptor in packet */
    288 	int txs_ndesc;			/* # of descriptors used */
    289 };
    290 
    291 /*
    292  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    293  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    294  * them together.
    295  */
    296 struct wm_rxsoft {
    297 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    298 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    299 };
    300 
    301 #define WM_LINKUP_TIMEOUT	50
    302 
    303 static uint16_t swfwphysem[] = {
    304 	SWFW_PHY0_SM,
    305 	SWFW_PHY1_SM,
    306 	SWFW_PHY2_SM,
    307 	SWFW_PHY3_SM
    308 };
    309 
    310 static const uint32_t wm_82580_rxpbs_table[] = {
    311 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    312 };
    313 
    314 struct wm_softc;
    315 
    316 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    317 #if !defined(WM_EVENT_COUNTERS)
    318 #define WM_EVENT_COUNTERS 1
    319 #endif
    320 #endif
    321 
    322 #ifdef WM_EVENT_COUNTERS
    323 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
    324 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    325 	struct evcnt qname##_ev_##evname
    326 
    327 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    328 	do {								\
    329 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    330 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    331 		    "%s%02d%s", #qname, (qnum), #evname);		\
    332 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    333 		    (evtype), NULL, (xname),				\
    334 		    (q)->qname##_##evname##_evcnt_name);		\
    335 	} while (0)
    336 
    337 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    338 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    339 
    340 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    341 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    342 
    343 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    344 	evcnt_detach(&(q)->qname##_ev_##evname)
    345 #endif /* WM_EVENT_COUNTERS */
    346 
    347 struct wm_txqueue {
    348 	kmutex_t *txq_lock;		/* lock for tx operations */
    349 
    350 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    351 
    352 	/* Software state for the transmit descriptors. */
    353 	int txq_num;			/* must be a power of two */
    354 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    355 
    356 	/* TX control data structures. */
    357 	int txq_ndesc;			/* must be a power of two */
    358 	size_t txq_descsize;		/* a tx descriptor size */
    359 	txdescs_t *txq_descs_u;
    360 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    361 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    362 	int txq_desc_rseg;		/* real number of control segment */
    363 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    364 #define	txq_descs	txq_descs_u->sctxu_txdescs
    365 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    366 
    367 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    368 
    369 	int txq_free;			/* number of free Tx descriptors */
    370 	int txq_next;			/* next ready Tx descriptor */
    371 
    372 	int txq_sfree;			/* number of free Tx jobs */
    373 	int txq_snext;			/* next free Tx job */
    374 	int txq_sdirty;			/* dirty Tx jobs */
    375 
    376 	/* These 4 variables are used only on the 82547. */
    377 	int txq_fifo_size;		/* Tx FIFO size */
    378 	int txq_fifo_head;		/* current head of FIFO */
    379 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    380 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    381 
    382 	/*
    383 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    384 	 * CPUs. This queue intermediate them without block.
    385 	 */
    386 	pcq_t *txq_interq;
    387 
    388 	/*
    389 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    390 	 * to manage Tx H/W queue's busy flag.
    391 	 */
    392 	int txq_flags;			/* flags for H/W queue, see below */
    393 #define	WM_TXQ_NO_SPACE		0x1
    394 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    395 
    396 	bool txq_stopping;
    397 
    398 	bool txq_sending;
    399 	time_t txq_lastsent;
    400 
    401 	/* Checksum flags used for previous packet */
    402 	uint32_t	txq_last_hw_cmd;
    403 	uint8_t		txq_last_hw_fields;
    404 	uint16_t	txq_last_hw_ipcs;
    405 	uint16_t	txq_last_hw_tucs;
    406 
    407 	uint32_t txq_packets;		/* for AIM */
    408 	uint32_t txq_bytes;		/* for AIM */
    409 #ifdef WM_EVENT_COUNTERS
    410 	/* TX event counters */
    411 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
    412 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
    413 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
    414 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
    415 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
    416 					    /* XXX not used? */
    417 
    418 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
    419 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
    420 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
    421 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
    422 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
    423 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
    424 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
    425 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
    426 					    /* other than toomanyseg */
    427 
    428 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
    429 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
    430 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
    431 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
    432 
    433 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    434 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    435 #endif /* WM_EVENT_COUNTERS */
    436 };
    437 
    438 struct wm_rxqueue {
    439 	kmutex_t *rxq_lock;		/* lock for rx operations */
    440 
    441 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    442 
    443 	/* Software state for the receive descriptors. */
    444 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    445 
    446 	/* RX control data structures. */
    447 	int rxq_ndesc;			/* must be a power of two */
    448 	size_t rxq_descsize;		/* a rx descriptor size */
    449 	rxdescs_t *rxq_descs_u;
    450 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    451 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    452 	int rxq_desc_rseg;		/* real number of control segment */
    453 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    454 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    455 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    456 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    457 
    458 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    459 
    460 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    461 	int rxq_discard;
    462 	int rxq_len;
    463 	struct mbuf *rxq_head;
    464 	struct mbuf *rxq_tail;
    465 	struct mbuf **rxq_tailp;
    466 
    467 	bool rxq_stopping;
    468 
    469 	uint32_t rxq_packets;		/* for AIM */
    470 	uint32_t rxq_bytes;		/* for AIM */
    471 #ifdef WM_EVENT_COUNTERS
    472 	/* RX event counters */
    473 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    474 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    475 
    476 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    477 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    478 #endif
    479 };
    480 
    481 struct wm_queue {
    482 	int wmq_id;			/* index of TX/RX queues */
    483 	int wmq_intr_idx;		/* index of MSI-X tables */
    484 
    485 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    486 	bool wmq_set_itr;
    487 
    488 	struct wm_txqueue wmq_txq;
    489 	struct wm_rxqueue wmq_rxq;
    490 	char sysctlname[32];		/* Name for sysctl */
    491 
    492 	bool wmq_txrx_use_workqueue;
    493 	struct work wmq_cookie;
    494 	void *wmq_si;
    495 };
    496 
    497 struct wm_phyop {
    498 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    499 	void (*release)(struct wm_softc *);
    500 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    501 	int (*writereg_locked)(device_t, int, int, uint16_t);
    502 	int reset_delay_us;
    503 	bool no_errprint;
    504 };
    505 
    506 struct wm_nvmop {
    507 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    508 	void (*release)(struct wm_softc *);
    509 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    510 };
    511 
    512 /*
    513  * Software state per device.
    514  */
    515 struct wm_softc {
    516 	device_t sc_dev;		/* generic device information */
    517 	bus_space_tag_t sc_st;		/* bus space tag */
    518 	bus_space_handle_t sc_sh;	/* bus space handle */
    519 	bus_size_t sc_ss;		/* bus space size */
    520 	bus_space_tag_t sc_iot;		/* I/O space tag */
    521 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    522 	bus_size_t sc_ios;		/* I/O space size */
    523 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    524 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    525 	bus_size_t sc_flashs;		/* flash registers space size */
    526 	off_t sc_flashreg_offset;	/*
    527 					 * offset to flash registers from
    528 					 * start of BAR
    529 					 */
    530 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    531 
    532 	struct ethercom sc_ethercom;	/* Ethernet common data */
    533 	struct mii_data sc_mii;		/* MII/media information */
    534 
    535 	pci_chipset_tag_t sc_pc;
    536 	pcitag_t sc_pcitag;
    537 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    538 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    539 
    540 	uint16_t sc_pcidevid;		/* PCI device ID */
    541 	wm_chip_type sc_type;		/* MAC type */
    542 	int sc_rev;			/* MAC revision */
    543 	wm_phy_type sc_phytype;		/* PHY type */
    544 	uint8_t sc_sfptype;		/* SFP type */
    545 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    546 #define	WM_MEDIATYPE_UNKNOWN		0x00
    547 #define	WM_MEDIATYPE_FIBER		0x01
    548 #define	WM_MEDIATYPE_COPPER		0x02
    549 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    550 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    551 	int sc_flags;			/* flags; see below */
    552 	u_short sc_if_flags;		/* last if_flags */
    553 	int sc_ec_capenable;		/* last ec_capenable */
    554 	int sc_flowflags;		/* 802.3x flow control flags */
    555 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    556 	int sc_align_tweak;
    557 
    558 	void *sc_ihs[WM_MAX_NINTR];	/*
    559 					 * interrupt cookie.
    560 					 * - legacy and msi use sc_ihs[0] only
    561 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    562 					 */
    563 	pci_intr_handle_t *sc_intrs;	/*
    564 					 * legacy and msi use sc_intrs[0] only
    565 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    566 					 */
    567 	int sc_nintrs;			/* number of interrupts */
    568 
    569 	int sc_link_intr_idx;		/* index of MSI-X tables */
    570 
    571 	callout_t sc_tick_ch;		/* tick callout */
    572 	bool sc_core_stopping;
    573 
    574 	int sc_nvm_ver_major;
    575 	int sc_nvm_ver_minor;
    576 	int sc_nvm_ver_build;
    577 	int sc_nvm_addrbits;		/* NVM address bits */
    578 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    579 	int sc_ich8_flash_base;
    580 	int sc_ich8_flash_bank_size;
    581 	int sc_nvm_k1_enabled;
    582 
    583 	int sc_nqueues;
    584 	struct wm_queue *sc_queue;
    585 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    586 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    587 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    588 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    589 	struct workqueue *sc_queue_wq;
    590 	bool sc_txrx_use_workqueue;
    591 
    592 	int sc_affinity_offset;
    593 
    594 #ifdef WM_EVENT_COUNTERS
    595 	/* Event counters. */
    596 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    597 
    598 	/* >= WM_T_82542_2_1 */
    599 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    600 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    601 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    602 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    603 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    604 
    605 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
    606 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
    607 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
    608 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
    609 	struct evcnt sc_ev_mpc;		/* Missed Packets */
    610 	struct evcnt sc_ev_colc;	/* Collision */
    611 	struct evcnt sc_ev_sec;		/* Sequence Error */
    612 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
    613 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
    614 	struct evcnt sc_ev_scc;		/* Single Collision */
    615 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
    616 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
    617 	struct evcnt sc_ev_latecol;	/* Late Collision */
    618 	struct evcnt sc_ev_dc;		/* Defer */
    619 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
    620 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
    621 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
    622 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
    623 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
    624 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
    625 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
    626 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
    627 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
    628 	struct evcnt sc_ev_roc;		/* Rx Oversize */
    629 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
    630 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
    631 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
    632 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
    633 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
    634 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
    635 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx Count */
    636 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
    637 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
    638 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
    639 	struct evcnt sc_ev_prc511;	/* Packets Rx (255-511 bytes) */
    640 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
    641 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
    642 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
    643 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
    644 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
    645 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
    646 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
    647 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
    648 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
    649 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
    650 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
    651 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
    652 	struct evcnt sc_ev_ictxact;	/* Intr. Cause Tx Abs Timer Expire */
    653 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
    654 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
    655 	struct evcnt sc_ev_icrxdmtc;	/* Intr. Cause Rx Desc Min Thresh */
    656 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
    657 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
    658 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
    659 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
    660 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
    661 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
    662 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
    663 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
    664 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
    665 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
    666 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
    667 
    668 #endif /* WM_EVENT_COUNTERS */
    669 
    670 	struct sysctllog *sc_sysctllog;
    671 
    672 	/* This variable are used only on the 82547. */
    673 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    674 
    675 	uint32_t sc_ctrl;		/* prototype CTRL register */
    676 #if 0
    677 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    678 #endif
    679 	uint32_t sc_icr;		/* prototype interrupt bits */
    680 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    681 	uint32_t sc_tctl;		/* prototype TCTL register */
    682 	uint32_t sc_rctl;		/* prototype RCTL register */
    683 	uint32_t sc_txcw;		/* prototype TXCW register */
    684 	uint32_t sc_tipg;		/* prototype TIPG register */
    685 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    686 	uint32_t sc_pba;		/* prototype PBA register */
    687 
    688 	int sc_tbi_linkup;		/* TBI link status */
    689 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    690 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    691 
    692 	int sc_mchash_type;		/* multicast filter offset */
    693 
    694 	krndsource_t rnd_source;	/* random source */
    695 
    696 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    697 
    698 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    699 	kmutex_t *sc_ich_phymtx;	/*
    700 					 * 82574/82583/ICH/PCH specific PHY
    701 					 * mutex. For 82574/82583, the mutex
    702 					 * is used for both PHY and NVM.
    703 					 */
    704 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    705 
    706 	struct wm_phyop phy;
    707 	struct wm_nvmop nvm;
    708 
    709 	struct workqueue *sc_reset_wq;
    710 	struct work sc_reset_work;
    711 	volatile unsigned sc_reset_pending;
    712 
    713 	bool sc_dying;
    714 
    715 #ifdef WM_DEBUG
    716 	uint32_t sc_debug;
    717 	bool sc_trigger_reset;
    718 #endif
    719 };
    720 
    721 #define WM_CORE_LOCK(_sc)						\
    722 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    723 #define WM_CORE_UNLOCK(_sc)						\
    724 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    725 #define WM_CORE_LOCKED(_sc)						\
    726 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    727 
    728 #define	WM_RXCHAIN_RESET(rxq)						\
    729 do {									\
    730 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    731 	*(rxq)->rxq_tailp = NULL;					\
    732 	(rxq)->rxq_len = 0;						\
    733 } while (/*CONSTCOND*/0)
    734 
    735 #define	WM_RXCHAIN_LINK(rxq, m)						\
    736 do {									\
    737 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    738 	(rxq)->rxq_tailp = &(m)->m_next;				\
    739 } while (/*CONSTCOND*/0)
    740 
    741 #ifdef WM_EVENT_COUNTERS
    742 #ifdef __HAVE_ATOMIC64_LOADSTORE
    743 #define	WM_EVCNT_INCR(ev)						\
    744 	atomic_store_relaxed(&((ev)->ev_count),				\
    745 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    746 #define	WM_EVCNT_ADD(ev, val)						\
    747 	atomic_store_relaxed(&((ev)->ev_count),				\
    748 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    749 #else
    750 #define	WM_EVCNT_INCR(ev)						\
    751 	((ev)->ev_count)++
    752 #define	WM_EVCNT_ADD(ev, val)						\
    753 	(ev)->ev_count += (val)
    754 #endif
    755 
    756 #define WM_Q_EVCNT_INCR(qname, evname)			\
    757 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    758 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    759 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    760 #else /* !WM_EVENT_COUNTERS */
    761 #define	WM_EVCNT_INCR(ev)	/* nothing */
    762 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    763 
    764 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    765 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    766 #endif /* !WM_EVENT_COUNTERS */
    767 
    768 #define	CSR_READ(sc, reg)						\
    769 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    770 #define	CSR_WRITE(sc, reg, val)						\
    771 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    772 #define	CSR_WRITE_FLUSH(sc)						\
    773 	(void)CSR_READ((sc), WMREG_STATUS)
    774 
    775 #define ICH8_FLASH_READ32(sc, reg)					\
    776 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    777 	    (reg) + sc->sc_flashreg_offset)
    778 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    779 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    780 	    (reg) + sc->sc_flashreg_offset, (data))
    781 
    782 #define ICH8_FLASH_READ16(sc, reg)					\
    783 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    784 	    (reg) + sc->sc_flashreg_offset)
    785 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    786 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    787 	    (reg) + sc->sc_flashreg_offset, (data))
    788 
    789 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    790 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    791 
    792 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    793 #define	WM_CDTXADDR_HI(txq, x)						\
    794 	(sizeof(bus_addr_t) == 8 ?					\
    795 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    796 
    797 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    798 #define	WM_CDRXADDR_HI(rxq, x)						\
    799 	(sizeof(bus_addr_t) == 8 ?					\
    800 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    801 
    802 /*
    803  * Register read/write functions.
    804  * Other than CSR_{READ|WRITE}().
    805  */
    806 #if 0
    807 static inline uint32_t wm_io_read(struct wm_softc *, int);
    808 #endif
    809 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    810 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    811     uint32_t, uint32_t);
    812 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    813 
    814 /*
    815  * Descriptor sync/init functions.
    816  */
    817 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    818 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    819 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    820 
    821 /*
    822  * Device driver interface functions and commonly used functions.
    823  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    824  */
    825 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    826 static int	wm_match(device_t, cfdata_t, void *);
    827 static void	wm_attach(device_t, device_t, void *);
    828 static int	wm_detach(device_t, int);
    829 static bool	wm_suspend(device_t, const pmf_qual_t *);
    830 static bool	wm_resume(device_t, const pmf_qual_t *);
    831 static bool	wm_watchdog(struct ifnet *);
    832 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    833     uint16_t *);
    834 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    835     uint16_t *);
    836 static void	wm_tick(void *);
    837 static int	wm_ifflags_cb(struct ethercom *);
    838 static int	wm_ioctl(struct ifnet *, u_long, void *);
    839 /* MAC address related */
    840 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    841 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    842 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    843 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    844 static int	wm_rar_count(struct wm_softc *);
    845 static void	wm_set_filter(struct wm_softc *);
    846 /* Reset and init related */
    847 static void	wm_set_vlan(struct wm_softc *);
    848 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    849 static void	wm_get_auto_rd_done(struct wm_softc *);
    850 static void	wm_lan_init_done(struct wm_softc *);
    851 static void	wm_get_cfg_done(struct wm_softc *);
    852 static int	wm_phy_post_reset(struct wm_softc *);
    853 static int	wm_write_smbus_addr(struct wm_softc *);
    854 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    855 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    856 static void	wm_initialize_hardware_bits(struct wm_softc *);
    857 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    858 static int	wm_reset_phy(struct wm_softc *);
    859 static void	wm_flush_desc_rings(struct wm_softc *);
    860 static void	wm_reset(struct wm_softc *);
    861 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    862 static void	wm_rxdrain(struct wm_rxqueue *);
    863 static void	wm_init_rss(struct wm_softc *);
    864 static void	wm_adjust_qnum(struct wm_softc *, int);
    865 static inline bool	wm_is_using_msix(struct wm_softc *);
    866 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    867 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    868 static int	wm_setup_legacy(struct wm_softc *);
    869 static int	wm_setup_msix(struct wm_softc *);
    870 static int	wm_init(struct ifnet *);
    871 static int	wm_init_locked(struct ifnet *);
    872 static void	wm_init_sysctls(struct wm_softc *);
    873 static void	wm_unset_stopping_flags(struct wm_softc *);
    874 static void	wm_set_stopping_flags(struct wm_softc *);
    875 static void	wm_stop(struct ifnet *, int);
    876 static void	wm_stop_locked(struct ifnet *, bool, bool);
    877 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    878 static void	wm_82547_txfifo_stall(void *);
    879 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    880 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    881 /* DMA related */
    882 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    883 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    884 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    885 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    886     struct wm_txqueue *);
    887 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    888 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    889 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    890     struct wm_rxqueue *);
    891 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    892 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    893 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    894 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    895 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    896 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    897 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    898     struct wm_txqueue *);
    899 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    900     struct wm_rxqueue *);
    901 static int	wm_alloc_txrx_queues(struct wm_softc *);
    902 static void	wm_free_txrx_queues(struct wm_softc *);
    903 static int	wm_init_txrx_queues(struct wm_softc *);
    904 /* Start */
    905 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    906     struct wm_txsoft *, uint32_t *, uint8_t *);
    907 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    908 static void	wm_start(struct ifnet *);
    909 static void	wm_start_locked(struct ifnet *);
    910 static int	wm_transmit(struct ifnet *, struct mbuf *);
    911 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    912 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    913 		    bool);
    914 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    915     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    916 static void	wm_nq_start(struct ifnet *);
    917 static void	wm_nq_start_locked(struct ifnet *);
    918 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    919 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    920 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    921 		    bool);
    922 static void	wm_deferred_start_locked(struct wm_txqueue *);
    923 static void	wm_handle_queue(void *);
    924 static void	wm_handle_queue_work(struct work *, void *);
    925 static void	wm_handle_reset_work(struct work *, void *);
    926 /* Interrupt */
    927 static bool	wm_txeof(struct wm_txqueue *, u_int);
    928 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    929 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    930 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    931 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    932 static void	wm_linkintr(struct wm_softc *, uint32_t);
    933 static int	wm_intr_legacy(void *);
    934 static inline void	wm_txrxintr_disable(struct wm_queue *);
    935 static inline void	wm_txrxintr_enable(struct wm_queue *);
    936 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    937 static int	wm_txrxintr_msix(void *);
    938 static int	wm_linkintr_msix(void *);
    939 
    940 /*
    941  * Media related.
    942  * GMII, SGMII, TBI, SERDES and SFP.
    943  */
    944 /* Common */
    945 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    946 /* GMII related */
    947 static void	wm_gmii_reset(struct wm_softc *);
    948 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    949 static int	wm_get_phy_id_82575(struct wm_softc *);
    950 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    951 static int	wm_gmii_mediachange(struct ifnet *);
    952 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    953 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    954 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    955 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    956 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    957 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    958 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    959 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    960 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    961 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    962 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    963 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    964 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    965 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    966 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    967 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    968 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    969 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    970 	bool);
    971 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    972 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    973 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    974 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    975 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    976 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    977 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    978 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    979 static void	wm_gmii_statchg(struct ifnet *);
    980 /*
    981  * kumeran related (80003, ICH* and PCH*).
    982  * These functions are not for accessing MII registers but for accessing
    983  * kumeran specific registers.
    984  */
    985 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    986 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    987 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    988 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    989 /* EMI register related */
    990 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    991 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    992 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    993 /* SGMII */
    994 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    995 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    996 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    997 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    998 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    999 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
   1000 /* TBI related */
   1001 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
   1002 static void	wm_tbi_mediainit(struct wm_softc *);
   1003 static int	wm_tbi_mediachange(struct ifnet *);
   1004 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
   1005 static int	wm_check_for_link(struct wm_softc *);
   1006 static void	wm_tbi_tick(struct wm_softc *);
   1007 /* SERDES related */
   1008 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
   1009 static int	wm_serdes_mediachange(struct ifnet *);
   1010 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
   1011 static void	wm_serdes_tick(struct wm_softc *);
   1012 /* SFP related */
   1013 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
   1014 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
   1015 
   1016 /*
   1017  * NVM related.
   1018  * Microwire, SPI (w/wo EERD) and Flash.
   1019  */
   1020 /* Misc functions */
   1021 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
   1022 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
   1023 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
   1024 /* Microwire */
   1025 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
   1026 /* SPI */
   1027 static int	wm_nvm_ready_spi(struct wm_softc *);
   1028 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
   1029 /* Using with EERD */
   1030 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
   1031 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
   1032 /* Flash */
   1033 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
   1034     unsigned int *);
   1035 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
   1036 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
   1037 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
   1038     uint32_t *);
   1039 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
   1040 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
   1041 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
   1042 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
   1043 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
   1044 /* iNVM */
   1045 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
   1046 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
   1047 /* Lock, detecting NVM type, validate checksum and read */
   1048 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
   1049 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
   1050 static int	wm_nvm_validate_checksum(struct wm_softc *);
   1051 static void	wm_nvm_version_invm(struct wm_softc *);
   1052 static void	wm_nvm_version(struct wm_softc *);
   1053 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
   1054 
   1055 /*
   1056  * Hardware semaphores.
   1057  * Very complexed...
   1058  */
   1059 static int	wm_get_null(struct wm_softc *);
   1060 static void	wm_put_null(struct wm_softc *);
   1061 static int	wm_get_eecd(struct wm_softc *);
   1062 static void	wm_put_eecd(struct wm_softc *);
   1063 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
   1064 static void	wm_put_swsm_semaphore(struct wm_softc *);
   1065 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
   1066 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
   1067 static int	wm_get_nvm_80003(struct wm_softc *);
   1068 static void	wm_put_nvm_80003(struct wm_softc *);
   1069 static int	wm_get_nvm_82571(struct wm_softc *);
   1070 static void	wm_put_nvm_82571(struct wm_softc *);
   1071 static int	wm_get_phy_82575(struct wm_softc *);
   1072 static void	wm_put_phy_82575(struct wm_softc *);
   1073 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1074 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1075 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1076 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1077 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1078 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1079 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1080 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1081 
   1082 /*
   1083  * Management mode and power management related subroutines.
   1084  * BMC, AMT, suspend/resume and EEE.
   1085  */
   1086 #if 0
   1087 static int	wm_check_mng_mode(struct wm_softc *);
   1088 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1089 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1090 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1091 #endif
   1092 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1093 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1094 static void	wm_get_hw_control(struct wm_softc *);
   1095 static void	wm_release_hw_control(struct wm_softc *);
   1096 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1097 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1098 static void	wm_init_manageability(struct wm_softc *);
   1099 static void	wm_release_manageability(struct wm_softc *);
   1100 static void	wm_get_wakeup(struct wm_softc *);
   1101 static int	wm_ulp_disable(struct wm_softc *);
   1102 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1103 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1104 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1105 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1106 static void	wm_enable_wakeup(struct wm_softc *);
   1107 static void	wm_disable_aspm(struct wm_softc *);
   1108 /* LPLU (Low Power Link Up) */
   1109 static void	wm_lplu_d0_disable(struct wm_softc *);
   1110 /* EEE */
   1111 static int	wm_set_eee_i350(struct wm_softc *);
   1112 static int	wm_set_eee_pchlan(struct wm_softc *);
   1113 static int	wm_set_eee(struct wm_softc *);
   1114 
   1115 /*
   1116  * Workarounds (mainly PHY related).
   1117  * Basically, PHY's workarounds are in the PHY drivers.
   1118  */
   1119 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1120 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1121 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1122 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1123 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1124 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1125 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1126 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1127 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1128 static int	wm_k1_workaround_lv(struct wm_softc *);
   1129 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1130 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1131 static int	wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
   1132 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1133 static void	wm_reset_init_script_82575(struct wm_softc *);
   1134 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1135 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1136 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1137 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1138 static int	wm_pll_workaround_i210(struct wm_softc *);
   1139 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1140 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1141 static void	wm_set_linkdown_discard(struct wm_softc *);
   1142 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1143 
   1144 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
   1145 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
   1146 #ifdef WM_DEBUG
   1147 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1148 #endif
   1149 
   1150 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1151     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1152 
   1153 /*
   1154  * Devices supported by this driver.
   1155  */
   1156 static const struct wm_product {
   1157 	pci_vendor_id_t		wmp_vendor;
   1158 	pci_product_id_t	wmp_product;
   1159 	const char		*wmp_name;
   1160 	wm_chip_type		wmp_type;
   1161 	uint32_t		wmp_flags;
   1162 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1163 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1164 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1165 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1166 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1167 } wm_products[] = {
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1169 	  "Intel i82542 1000BASE-X Ethernet",
   1170 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1173 	  "Intel i82543GC 1000BASE-X Ethernet",
   1174 	  WM_T_82543,		WMP_F_FIBER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1177 	  "Intel i82543GC 1000BASE-T Ethernet",
   1178 	  WM_T_82543,		WMP_F_COPPER },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1181 	  "Intel i82544EI 1000BASE-T Ethernet",
   1182 	  WM_T_82544,		WMP_F_COPPER },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1185 	  "Intel i82544EI 1000BASE-X Ethernet",
   1186 	  WM_T_82544,		WMP_F_FIBER },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1189 	  "Intel i82544GC 1000BASE-T Ethernet",
   1190 	  WM_T_82544,		WMP_F_COPPER },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1193 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1194 	  WM_T_82544,		WMP_F_COPPER },
   1195 
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1197 	  "Intel i82540EM 1000BASE-T Ethernet",
   1198 	  WM_T_82540,		WMP_F_COPPER },
   1199 
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1201 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1202 	  WM_T_82540,		WMP_F_COPPER },
   1203 
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1205 	  "Intel i82540EP 1000BASE-T Ethernet",
   1206 	  WM_T_82540,		WMP_F_COPPER },
   1207 
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1209 	  "Intel i82540EP 1000BASE-T Ethernet",
   1210 	  WM_T_82540,		WMP_F_COPPER },
   1211 
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1213 	  "Intel i82540EP 1000BASE-T Ethernet",
   1214 	  WM_T_82540,		WMP_F_COPPER },
   1215 
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1217 	  "Intel i82545EM 1000BASE-T Ethernet",
   1218 	  WM_T_82545,		WMP_F_COPPER },
   1219 
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1221 	  "Intel i82545GM 1000BASE-T Ethernet",
   1222 	  WM_T_82545_3,		WMP_F_COPPER },
   1223 
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1225 	  "Intel i82545GM 1000BASE-X Ethernet",
   1226 	  WM_T_82545_3,		WMP_F_FIBER },
   1227 
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1229 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1230 	  WM_T_82545_3,		WMP_F_SERDES },
   1231 
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1233 	  "Intel i82546EB 1000BASE-T Ethernet",
   1234 	  WM_T_82546,		WMP_F_COPPER },
   1235 
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1237 	  "Intel i82546EB 1000BASE-T Ethernet",
   1238 	  WM_T_82546,		WMP_F_COPPER },
   1239 
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1241 	  "Intel i82545EM 1000BASE-X Ethernet",
   1242 	  WM_T_82545,		WMP_F_FIBER },
   1243 
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1245 	  "Intel i82546EB 1000BASE-X Ethernet",
   1246 	  WM_T_82546,		WMP_F_FIBER },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1249 	  "Intel i82546GB 1000BASE-T Ethernet",
   1250 	  WM_T_82546_3,		WMP_F_COPPER },
   1251 
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1253 	  "Intel i82546GB 1000BASE-X Ethernet",
   1254 	  WM_T_82546_3,		WMP_F_FIBER },
   1255 
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1257 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1258 	  WM_T_82546_3,		WMP_F_SERDES },
   1259 
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1261 	  "i82546GB quad-port Gigabit Ethernet",
   1262 	  WM_T_82546_3,		WMP_F_COPPER },
   1263 
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1265 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1266 	  WM_T_82546_3,		WMP_F_COPPER },
   1267 
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1269 	  "Intel PRO/1000MT (82546GB)",
   1270 	  WM_T_82546_3,		WMP_F_COPPER },
   1271 
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1273 	  "Intel i82541EI 1000BASE-T Ethernet",
   1274 	  WM_T_82541,		WMP_F_COPPER },
   1275 
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1277 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1278 	  WM_T_82541,		WMP_F_COPPER },
   1279 
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1281 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1282 	  WM_T_82541,		WMP_F_COPPER },
   1283 
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1285 	  "Intel i82541ER 1000BASE-T Ethernet",
   1286 	  WM_T_82541_2,		WMP_F_COPPER },
   1287 
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1289 	  "Intel i82541GI 1000BASE-T Ethernet",
   1290 	  WM_T_82541_2,		WMP_F_COPPER },
   1291 
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1293 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1294 	  WM_T_82541_2,		WMP_F_COPPER },
   1295 
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1297 	  "Intel i82541PI 1000BASE-T Ethernet",
   1298 	  WM_T_82541_2,		WMP_F_COPPER },
   1299 
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1301 	  "Intel i82547EI 1000BASE-T Ethernet",
   1302 	  WM_T_82547,		WMP_F_COPPER },
   1303 
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1305 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1306 	  WM_T_82547,		WMP_F_COPPER },
   1307 
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1309 	  "Intel i82547GI 1000BASE-T Ethernet",
   1310 	  WM_T_82547_2,		WMP_F_COPPER },
   1311 
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1313 	  "Intel PRO/1000 PT (82571EB)",
   1314 	  WM_T_82571,		WMP_F_COPPER },
   1315 
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1317 	  "Intel PRO/1000 PF (82571EB)",
   1318 	  WM_T_82571,		WMP_F_FIBER },
   1319 
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1321 	  "Intel PRO/1000 PB (82571EB)",
   1322 	  WM_T_82571,		WMP_F_SERDES },
   1323 
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1325 	  "Intel PRO/1000 QT (82571EB)",
   1326 	  WM_T_82571,		WMP_F_COPPER },
   1327 
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1329 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1330 	  WM_T_82571,		WMP_F_COPPER },
   1331 
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1333 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1334 	  WM_T_82571,		WMP_F_COPPER },
   1335 
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1337 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1338 	  WM_T_82571,		WMP_F_SERDES },
   1339 
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1341 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1342 	  WM_T_82571,		WMP_F_SERDES },
   1343 
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1345 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1346 	  WM_T_82571,		WMP_F_FIBER },
   1347 
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1349 	  "Intel i82572EI 1000baseT Ethernet",
   1350 	  WM_T_82572,		WMP_F_COPPER },
   1351 
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1353 	  "Intel i82572EI 1000baseX Ethernet",
   1354 	  WM_T_82572,		WMP_F_FIBER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1357 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1358 	  WM_T_82572,		WMP_F_SERDES },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1361 	  "Intel i82572EI 1000baseT Ethernet",
   1362 	  WM_T_82572,		WMP_F_COPPER },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1365 	  "Intel i82573E",
   1366 	  WM_T_82573,		WMP_F_COPPER },
   1367 
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1369 	  "Intel i82573E IAMT",
   1370 	  WM_T_82573,		WMP_F_COPPER },
   1371 
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1373 	  "Intel i82573L Gigabit Ethernet",
   1374 	  WM_T_82573,		WMP_F_COPPER },
   1375 
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1377 	  "Intel i82574L",
   1378 	  WM_T_82574,		WMP_F_COPPER },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1381 	  "Intel i82574L",
   1382 	  WM_T_82574,		WMP_F_COPPER },
   1383 
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1385 	  "Intel i82583V",
   1386 	  WM_T_82583,		WMP_F_COPPER },
   1387 
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1389 	  "i80003 dual 1000baseT Ethernet",
   1390 	  WM_T_80003,		WMP_F_COPPER },
   1391 
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1393 	  "i80003 dual 1000baseX Ethernet",
   1394 	  WM_T_80003,		WMP_F_COPPER },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1397 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1398 	  WM_T_80003,		WMP_F_SERDES },
   1399 
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1401 	  "Intel i80003 1000baseT Ethernet",
   1402 	  WM_T_80003,		WMP_F_COPPER },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1405 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1406 	  WM_T_80003,		WMP_F_SERDES },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1409 	  "Intel i82801H (M_AMT) LAN Controller",
   1410 	  WM_T_ICH8,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1412 	  "Intel i82801H (AMT) LAN Controller",
   1413 	  WM_T_ICH8,		WMP_F_COPPER },
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1415 	  "Intel i82801H LAN Controller",
   1416 	  WM_T_ICH8,		WMP_F_COPPER },
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1418 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1419 	  WM_T_ICH8,		WMP_F_COPPER },
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1421 	  "Intel i82801H (M) LAN Controller",
   1422 	  WM_T_ICH8,		WMP_F_COPPER },
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1424 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1425 	  WM_T_ICH8,		WMP_F_COPPER },
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1427 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1428 	  WM_T_ICH8,		WMP_F_COPPER },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1430 	  "82567V-3 LAN Controller",
   1431 	  WM_T_ICH8,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1433 	  "82801I (AMT) LAN Controller",
   1434 	  WM_T_ICH9,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1436 	  "82801I 10/100 LAN Controller",
   1437 	  WM_T_ICH9,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1439 	  "82801I (G) 10/100 LAN Controller",
   1440 	  WM_T_ICH9,		WMP_F_COPPER },
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1442 	  "82801I (GT) 10/100 LAN Controller",
   1443 	  WM_T_ICH9,		WMP_F_COPPER },
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1445 	  "82801I (C) LAN Controller",
   1446 	  WM_T_ICH9,		WMP_F_COPPER },
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1448 	  "82801I mobile LAN Controller",
   1449 	  WM_T_ICH9,		WMP_F_COPPER },
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1451 	  "82801I mobile (V) LAN Controller",
   1452 	  WM_T_ICH9,		WMP_F_COPPER },
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1454 	  "82801I mobile (AMT) LAN Controller",
   1455 	  WM_T_ICH9,		WMP_F_COPPER },
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1457 	  "82567LM-4 LAN Controller",
   1458 	  WM_T_ICH9,		WMP_F_COPPER },
   1459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1460 	  "82567LM-2 LAN Controller",
   1461 	  WM_T_ICH10,		WMP_F_COPPER },
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1463 	  "82567LF-2 LAN Controller",
   1464 	  WM_T_ICH10,		WMP_F_COPPER },
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1466 	  "82567LM-3 LAN Controller",
   1467 	  WM_T_ICH10,		WMP_F_COPPER },
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1469 	  "82567LF-3 LAN Controller",
   1470 	  WM_T_ICH10,		WMP_F_COPPER },
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1472 	  "82567V-2 LAN Controller",
   1473 	  WM_T_ICH10,		WMP_F_COPPER },
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1475 	  "82567V-3? LAN Controller",
   1476 	  WM_T_ICH10,		WMP_F_COPPER },
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1478 	  "HANKSVILLE LAN Controller",
   1479 	  WM_T_ICH10,		WMP_F_COPPER },
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1481 	  "PCH LAN (82577LM) Controller",
   1482 	  WM_T_PCH,		WMP_F_COPPER },
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1484 	  "PCH LAN (82577LC) Controller",
   1485 	  WM_T_PCH,		WMP_F_COPPER },
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1487 	  "PCH LAN (82578DM) Controller",
   1488 	  WM_T_PCH,		WMP_F_COPPER },
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1490 	  "PCH LAN (82578DC) Controller",
   1491 	  WM_T_PCH,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1493 	  "PCH2 LAN (82579LM) Controller",
   1494 	  WM_T_PCH2,		WMP_F_COPPER },
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1496 	  "PCH2 LAN (82579V) Controller",
   1497 	  WM_T_PCH2,		WMP_F_COPPER },
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1499 	  "82575EB dual-1000baseT Ethernet",
   1500 	  WM_T_82575,		WMP_F_COPPER },
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1502 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1503 	  WM_T_82575,		WMP_F_SERDES },
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1505 	  "82575GB quad-1000baseT Ethernet",
   1506 	  WM_T_82575,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1508 	  "82575GB quad-1000baseT Ethernet (PM)",
   1509 	  WM_T_82575,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1511 	  "82576 1000BaseT Ethernet",
   1512 	  WM_T_82576,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1514 	  "82576 1000BaseX Ethernet",
   1515 	  WM_T_82576,		WMP_F_FIBER },
   1516 
   1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1518 	  "82576 gigabit Ethernet (SERDES)",
   1519 	  WM_T_82576,		WMP_F_SERDES },
   1520 
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1522 	  "82576 quad-1000BaseT Ethernet",
   1523 	  WM_T_82576,		WMP_F_COPPER },
   1524 
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1526 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1527 	  WM_T_82576,		WMP_F_COPPER },
   1528 
   1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1530 	  "82576 gigabit Ethernet",
   1531 	  WM_T_82576,		WMP_F_COPPER },
   1532 
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1534 	  "82576 gigabit Ethernet (SERDES)",
   1535 	  WM_T_82576,		WMP_F_SERDES },
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1537 	  "82576 quad-gigabit Ethernet (SERDES)",
   1538 	  WM_T_82576,		WMP_F_SERDES },
   1539 
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1541 	  "82580 1000BaseT Ethernet",
   1542 	  WM_T_82580,		WMP_F_COPPER },
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1544 	  "82580 1000BaseX Ethernet",
   1545 	  WM_T_82580,		WMP_F_FIBER },
   1546 
   1547 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1548 	  "82580 1000BaseT Ethernet (SERDES)",
   1549 	  WM_T_82580,		WMP_F_SERDES },
   1550 
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1552 	  "82580 gigabit Ethernet (SGMII)",
   1553 	  WM_T_82580,		WMP_F_COPPER },
   1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1555 	  "82580 dual-1000BaseT Ethernet",
   1556 	  WM_T_82580,		WMP_F_COPPER },
   1557 
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1559 	  "82580 quad-1000BaseX Ethernet",
   1560 	  WM_T_82580,		WMP_F_FIBER },
   1561 
   1562 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1563 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1564 	  WM_T_82580,		WMP_F_COPPER },
   1565 
   1566 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1567 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1568 	  WM_T_82580,		WMP_F_SERDES },
   1569 
   1570 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1571 	  "DH89XXCC 1000BASE-KX Ethernet",
   1572 	  WM_T_82580,		WMP_F_SERDES },
   1573 
   1574 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1575 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1576 	  WM_T_82580,		WMP_F_SERDES },
   1577 
   1578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1579 	  "I350 Gigabit Network Connection",
   1580 	  WM_T_I350,		WMP_F_COPPER },
   1581 
   1582 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1583 	  "I350 Gigabit Fiber Network Connection",
   1584 	  WM_T_I350,		WMP_F_FIBER },
   1585 
   1586 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1587 	  "I350 Gigabit Backplane Connection",
   1588 	  WM_T_I350,		WMP_F_SERDES },
   1589 
   1590 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1591 	  "I350 Quad Port Gigabit Ethernet",
   1592 	  WM_T_I350,		WMP_F_SERDES },
   1593 
   1594 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1595 	  "I350 Gigabit Connection",
   1596 	  WM_T_I350,		WMP_F_COPPER },
   1597 
   1598 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1599 	  "I354 Gigabit Ethernet (KX)",
   1600 	  WM_T_I354,		WMP_F_SERDES },
   1601 
   1602 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1603 	  "I354 Gigabit Ethernet (SGMII)",
   1604 	  WM_T_I354,		WMP_F_COPPER },
   1605 
   1606 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1607 	  "I354 Gigabit Ethernet (2.5G)",
   1608 	  WM_T_I354,		WMP_F_COPPER },
   1609 
   1610 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1611 	  "I210-T1 Ethernet Server Adapter",
   1612 	  WM_T_I210,		WMP_F_COPPER },
   1613 
   1614 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1615 	  "I210 Ethernet (Copper OEM)",
   1616 	  WM_T_I210,		WMP_F_COPPER },
   1617 
   1618 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1619 	  "I210 Ethernet (Copper IT)",
   1620 	  WM_T_I210,		WMP_F_COPPER },
   1621 
   1622 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1623 	  "I210 Ethernet (Copper, FLASH less)",
   1624 	  WM_T_I210,		WMP_F_COPPER },
   1625 
   1626 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1627 	  "I210 Gigabit Ethernet (Fiber)",
   1628 	  WM_T_I210,		WMP_F_FIBER },
   1629 
   1630 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1631 	  "I210 Gigabit Ethernet (SERDES)",
   1632 	  WM_T_I210,		WMP_F_SERDES },
   1633 
   1634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1635 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1636 	  WM_T_I210,		WMP_F_SERDES },
   1637 
   1638 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1639 	  "I210 Gigabit Ethernet (SGMII)",
   1640 	  WM_T_I210,		WMP_F_COPPER },
   1641 
   1642 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1643 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1644 	  WM_T_I210,		WMP_F_COPPER },
   1645 
   1646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1647 	  "I211 Ethernet (COPPER)",
   1648 	  WM_T_I211,		WMP_F_COPPER },
   1649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1650 	  "I217 V Ethernet Connection",
   1651 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1652 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1653 	  "I217 LM Ethernet Connection",
   1654 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1656 	  "I218 V Ethernet Connection",
   1657 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1659 	  "I218 V Ethernet Connection",
   1660 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1662 	  "I218 V Ethernet Connection",
   1663 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1664 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1665 	  "I218 LM Ethernet Connection",
   1666 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1668 	  "I218 LM Ethernet Connection",
   1669 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1671 	  "I218 LM Ethernet Connection",
   1672 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1674 	  "I219 LM Ethernet Connection",
   1675 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1676 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1677 	  "I219 LM (2) Ethernet Connection",
   1678 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1679 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1680 	  "I219 LM (3) Ethernet Connection",
   1681 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1682 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1683 	  "I219 LM (4) Ethernet Connection",
   1684 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1685 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1686 	  "I219 LM (5) Ethernet Connection",
   1687 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1688 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1689 	  "I219 LM (6) Ethernet Connection",
   1690 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1691 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1692 	  "I219 LM (7) Ethernet Connection",
   1693 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1694 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1695 	  "I219 LM (8) Ethernet Connection",
   1696 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1698 	  "I219 LM (9) Ethernet Connection",
   1699 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1700 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1701 	  "I219 LM (10) Ethernet Connection",
   1702 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1703 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1704 	  "I219 LM (11) Ethernet Connection",
   1705 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1706 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1707 	  "I219 LM (12) Ethernet Connection",
   1708 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1709 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1710 	  "I219 LM (13) Ethernet Connection",
   1711 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1712 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1713 	  "I219 LM (14) Ethernet Connection",
   1714 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1715 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1716 	  "I219 LM (15) Ethernet Connection",
   1717 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1718 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1719 	  "I219 LM (16) Ethernet Connection",
   1720 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1721 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1722 	  "I219 LM (17) Ethernet Connection",
   1723 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1724 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1725 	  "I219 LM (18) Ethernet Connection",
   1726 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1727 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1728 	  "I219 LM (19) Ethernet Connection",
   1729 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1730 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1731 	  "I219 V Ethernet Connection",
   1732 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1733 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1734 	  "I219 V (2) Ethernet Connection",
   1735 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1736 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1737 	  "I219 V (4) Ethernet Connection",
   1738 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1739 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1740 	  "I219 V (5) Ethernet Connection",
   1741 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1742 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1743 	  "I219 V (6) Ethernet Connection",
   1744 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1745 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1746 	  "I219 V (7) Ethernet Connection",
   1747 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1748 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1749 	  "I219 V (8) Ethernet Connection",
   1750 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1751 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1752 	  "I219 V (9) Ethernet Connection",
   1753 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1754 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1755 	  "I219 V (10) Ethernet Connection",
   1756 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1757 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1758 	  "I219 V (11) Ethernet Connection",
   1759 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1760 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1761 	  "I219 V (12) Ethernet Connection",
   1762 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1763 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1764 	  "I219 V (13) Ethernet Connection",
   1765 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1766 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1767 	  "I219 V (14) Ethernet Connection",
   1768 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1769 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1770 	  "I219 V (15) Ethernet Connection",
   1771 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1772 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1773 	  "I219 V (16) Ethernet Connection",
   1774 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1775 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1776 	  "I219 V (17) Ethernet Connection",
   1777 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1778 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1779 	  "I219 V (18) Ethernet Connection",
   1780 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1781 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1782 	  "I219 V (19) Ethernet Connection",
   1783 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1784 	{ 0,			0,
   1785 	  NULL,
   1786 	  0,			0 },
   1787 };
   1788 
   1789 /*
   1790  * Register read/write functions.
   1791  * Other than CSR_{READ|WRITE}().
   1792  */
   1793 
   1794 #if 0 /* Not currently used */
   1795 static inline uint32_t
   1796 wm_io_read(struct wm_softc *sc, int reg)
   1797 {
   1798 
   1799 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1800 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1801 }
   1802 #endif
   1803 
   1804 static inline void
   1805 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1806 {
   1807 
   1808 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1809 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1810 }
   1811 
   1812 static inline void
   1813 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1814     uint32_t data)
   1815 {
   1816 	uint32_t regval;
   1817 	int i;
   1818 
   1819 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1820 
   1821 	CSR_WRITE(sc, reg, regval);
   1822 
   1823 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1824 		delay(5);
   1825 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1826 			break;
   1827 	}
   1828 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1829 		aprint_error("%s: WARNING:"
   1830 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1831 		    device_xname(sc->sc_dev), reg);
   1832 	}
   1833 }
   1834 
   1835 static inline void
   1836 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1837 {
   1838 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
   1839 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
   1840 }
   1841 
   1842 /*
   1843  * Descriptor sync/init functions.
   1844  */
   1845 static inline void
   1846 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1847 {
   1848 	struct wm_softc *sc = txq->txq_sc;
   1849 
   1850 	/* If it will wrap around, sync to the end of the ring. */
   1851 	if ((start + num) > WM_NTXDESC(txq)) {
   1852 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1853 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1854 		    (WM_NTXDESC(txq) - start), ops);
   1855 		num -= (WM_NTXDESC(txq) - start);
   1856 		start = 0;
   1857 	}
   1858 
   1859 	/* Now sync whatever is left. */
   1860 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1861 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1862 }
   1863 
   1864 static inline void
   1865 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1866 {
   1867 	struct wm_softc *sc = rxq->rxq_sc;
   1868 
   1869 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1870 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1871 }
   1872 
   1873 static inline void
   1874 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1875 {
   1876 	struct wm_softc *sc = rxq->rxq_sc;
   1877 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1878 	struct mbuf *m = rxs->rxs_mbuf;
   1879 
   1880 	/*
   1881 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1882 	 * so that the payload after the Ethernet header is aligned
   1883 	 * to a 4-byte boundary.
   1884 
   1885 	 * XXX BRAINDAMAGE ALERT!
   1886 	 * The stupid chip uses the same size for every buffer, which
   1887 	 * is set in the Receive Control register.  We are using the 2K
   1888 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1889 	 * reason, we can't "scoot" packets longer than the standard
   1890 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1891 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1892 	 * the upper layer copy the headers.
   1893 	 */
   1894 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1895 
   1896 	if (sc->sc_type == WM_T_82574) {
   1897 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1898 		rxd->erx_data.erxd_addr =
   1899 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1900 		rxd->erx_data.erxd_dd = 0;
   1901 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1902 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1903 
   1904 		rxd->nqrx_data.nrxd_paddr =
   1905 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1906 		/* Currently, split header is not supported. */
   1907 		rxd->nqrx_data.nrxd_haddr = 0;
   1908 	} else {
   1909 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1910 
   1911 		wm_set_dma_addr(&rxd->wrx_addr,
   1912 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1913 		rxd->wrx_len = 0;
   1914 		rxd->wrx_cksum = 0;
   1915 		rxd->wrx_status = 0;
   1916 		rxd->wrx_errors = 0;
   1917 		rxd->wrx_special = 0;
   1918 	}
   1919 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1920 
   1921 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1922 }
   1923 
   1924 /*
   1925  * Device driver interface functions and commonly used functions.
   1926  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1927  */
   1928 
   1929 /* Lookup supported device table */
   1930 static const struct wm_product *
   1931 wm_lookup(const struct pci_attach_args *pa)
   1932 {
   1933 	const struct wm_product *wmp;
   1934 
   1935 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1936 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1937 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1938 			return wmp;
   1939 	}
   1940 	return NULL;
   1941 }
   1942 
   1943 /* The match function (ca_match) */
   1944 static int
   1945 wm_match(device_t parent, cfdata_t cf, void *aux)
   1946 {
   1947 	struct pci_attach_args *pa = aux;
   1948 
   1949 	if (wm_lookup(pa) != NULL)
   1950 		return 1;
   1951 
   1952 	return 0;
   1953 }
   1954 
   1955 /* The attach function (ca_attach) */
   1956 static void
   1957 wm_attach(device_t parent, device_t self, void *aux)
   1958 {
   1959 	struct wm_softc *sc = device_private(self);
   1960 	struct pci_attach_args *pa = aux;
   1961 	prop_dictionary_t dict;
   1962 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1963 	pci_chipset_tag_t pc = pa->pa_pc;
   1964 	int counts[PCI_INTR_TYPE_SIZE];
   1965 	pci_intr_type_t max_type;
   1966 	const char *eetype, *xname;
   1967 	bus_space_tag_t memt;
   1968 	bus_space_handle_t memh;
   1969 	bus_size_t memsize;
   1970 	int memh_valid;
   1971 	int i, error;
   1972 	const struct wm_product *wmp;
   1973 	prop_data_t ea;
   1974 	prop_number_t pn;
   1975 	uint8_t enaddr[ETHER_ADDR_LEN];
   1976 	char buf[256];
   1977 	char wqname[MAXCOMLEN];
   1978 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1979 	pcireg_t preg, memtype;
   1980 	uint16_t eeprom_data, apme_mask;
   1981 	bool force_clear_smbi;
   1982 	uint32_t link_mode;
   1983 	uint32_t reg;
   1984 
   1985 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1986 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1987 #endif
   1988 	sc->sc_dev = self;
   1989 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1990 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1991 	sc->sc_core_stopping = false;
   1992 
   1993 	wmp = wm_lookup(pa);
   1994 #ifdef DIAGNOSTIC
   1995 	if (wmp == NULL) {
   1996 		printf("\n");
   1997 		panic("wm_attach: impossible");
   1998 	}
   1999 #endif
   2000 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   2001 
   2002 	sc->sc_pc = pa->pa_pc;
   2003 	sc->sc_pcitag = pa->pa_tag;
   2004 
   2005 	if (pci_dma64_available(pa)) {
   2006 		aprint_verbose(", 64-bit DMA");
   2007 		sc->sc_dmat = pa->pa_dmat64;
   2008 	} else {
   2009 		aprint_verbose(", 32-bit DMA");
   2010 		sc->sc_dmat = pa->pa_dmat;
   2011 	}
   2012 
   2013 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   2014 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   2015 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   2016 
   2017 	sc->sc_type = wmp->wmp_type;
   2018 
   2019 	/* Set default function pointers */
   2020 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   2021 	sc->phy.release = sc->nvm.release = wm_put_null;
   2022 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   2023 
   2024 	if (sc->sc_type < WM_T_82543) {
   2025 		if (sc->sc_rev < 2) {
   2026 			aprint_error_dev(sc->sc_dev,
   2027 			    "i82542 must be at least rev. 2\n");
   2028 			return;
   2029 		}
   2030 		if (sc->sc_rev < 3)
   2031 			sc->sc_type = WM_T_82542_2_0;
   2032 	}
   2033 
   2034 	/*
   2035 	 * Disable MSI for Errata:
   2036 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   2037 	 *
   2038 	 *  82544: Errata 25
   2039 	 *  82540: Errata  6 (easy to reproduce device timeout)
   2040 	 *  82545: Errata  4 (easy to reproduce device timeout)
   2041 	 *  82546: Errata 26 (easy to reproduce device timeout)
   2042 	 *  82541: Errata  7 (easy to reproduce device timeout)
   2043 	 *
   2044 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   2045 	 *
   2046 	 *  82571 & 82572: Errata 63
   2047 	 */
   2048 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   2049 	    || (sc->sc_type == WM_T_82572))
   2050 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   2051 
   2052 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2053 	    || (sc->sc_type == WM_T_82580)
   2054 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2055 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2056 		sc->sc_flags |= WM_F_NEWQUEUE;
   2057 
   2058 	/* Set device properties (mactype) */
   2059 	dict = device_properties(sc->sc_dev);
   2060 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   2061 
   2062 	/*
   2063 	 * Map the device.  All devices support memory-mapped acccess,
   2064 	 * and it is really required for normal operation.
   2065 	 */
   2066 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   2067 	switch (memtype) {
   2068 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2069 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2070 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   2071 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   2072 		break;
   2073 	default:
   2074 		memh_valid = 0;
   2075 		break;
   2076 	}
   2077 
   2078 	if (memh_valid) {
   2079 		sc->sc_st = memt;
   2080 		sc->sc_sh = memh;
   2081 		sc->sc_ss = memsize;
   2082 	} else {
   2083 		aprint_error_dev(sc->sc_dev,
   2084 		    "unable to map device registers\n");
   2085 		return;
   2086 	}
   2087 
   2088 	/*
   2089 	 * In addition, i82544 and later support I/O mapped indirect
   2090 	 * register access.  It is not desirable (nor supported in
   2091 	 * this driver) to use it for normal operation, though it is
   2092 	 * required to work around bugs in some chip versions.
   2093 	 */
   2094 	switch (sc->sc_type) {
   2095 	case WM_T_82544:
   2096 	case WM_T_82541:
   2097 	case WM_T_82541_2:
   2098 	case WM_T_82547:
   2099 	case WM_T_82547_2:
   2100 		/* First we have to find the I/O BAR. */
   2101 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2102 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2103 			if (memtype == PCI_MAPREG_TYPE_IO)
   2104 				break;
   2105 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2106 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2107 				i += 4;	/* skip high bits, too */
   2108 		}
   2109 		if (i < PCI_MAPREG_END) {
   2110 			/*
   2111 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2112 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2113 			 * It's no problem because newer chips has no this
   2114 			 * bug.
   2115 			 *
   2116 			 * The i8254x doesn't apparently respond when the
   2117 			 * I/O BAR is 0, which looks somewhat like it's not
   2118 			 * been configured.
   2119 			 */
   2120 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2121 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2122 				aprint_error_dev(sc->sc_dev,
   2123 				    "WARNING: I/O BAR at zero.\n");
   2124 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2125 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
   2126 			    == 0) {
   2127 				sc->sc_flags |= WM_F_IOH_VALID;
   2128 			} else
   2129 				aprint_error_dev(sc->sc_dev,
   2130 				    "WARNING: unable to map I/O space\n");
   2131 		}
   2132 		break;
   2133 	default:
   2134 		break;
   2135 	}
   2136 
   2137 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2138 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2139 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2140 	if (sc->sc_type < WM_T_82542_2_1)
   2141 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2142 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2143 
   2144 	/* Power up chip */
   2145 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2146 	    && error != EOPNOTSUPP) {
   2147 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2148 		return;
   2149 	}
   2150 
   2151 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2152 	/*
   2153 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2154 	 * resource.
   2155 	 */
   2156 	if (sc->sc_nqueues > 1) {
   2157 		max_type = PCI_INTR_TYPE_MSIX;
   2158 		/*
   2159 		 *  82583 has a MSI-X capability in the PCI configuration space
   2160 		 * but it doesn't support it. At least the document doesn't
   2161 		 * say anything about MSI-X.
   2162 		 */
   2163 		counts[PCI_INTR_TYPE_MSIX]
   2164 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2165 	} else {
   2166 		max_type = PCI_INTR_TYPE_MSI;
   2167 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2168 	}
   2169 
   2170 	/* Allocation settings */
   2171 	counts[PCI_INTR_TYPE_MSI] = 1;
   2172 	counts[PCI_INTR_TYPE_INTX] = 1;
   2173 	/* overridden by disable flags */
   2174 	if (wm_disable_msi != 0) {
   2175 		counts[PCI_INTR_TYPE_MSI] = 0;
   2176 		if (wm_disable_msix != 0) {
   2177 			max_type = PCI_INTR_TYPE_INTX;
   2178 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2179 		}
   2180 	} else if (wm_disable_msix != 0) {
   2181 		max_type = PCI_INTR_TYPE_MSI;
   2182 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2183 	}
   2184 
   2185 alloc_retry:
   2186 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2187 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2188 		return;
   2189 	}
   2190 
   2191 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2192 		error = wm_setup_msix(sc);
   2193 		if (error) {
   2194 			pci_intr_release(pc, sc->sc_intrs,
   2195 			    counts[PCI_INTR_TYPE_MSIX]);
   2196 
   2197 			/* Setup for MSI: Disable MSI-X */
   2198 			max_type = PCI_INTR_TYPE_MSI;
   2199 			counts[PCI_INTR_TYPE_MSI] = 1;
   2200 			counts[PCI_INTR_TYPE_INTX] = 1;
   2201 			goto alloc_retry;
   2202 		}
   2203 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2204 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2205 		error = wm_setup_legacy(sc);
   2206 		if (error) {
   2207 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2208 			    counts[PCI_INTR_TYPE_MSI]);
   2209 
   2210 			/* The next try is for INTx: Disable MSI */
   2211 			max_type = PCI_INTR_TYPE_INTX;
   2212 			counts[PCI_INTR_TYPE_INTX] = 1;
   2213 			goto alloc_retry;
   2214 		}
   2215 	} else {
   2216 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2217 		error = wm_setup_legacy(sc);
   2218 		if (error) {
   2219 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2220 			    counts[PCI_INTR_TYPE_INTX]);
   2221 			return;
   2222 		}
   2223 	}
   2224 
   2225 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2226 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2227 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2228 	    WM_WORKQUEUE_FLAGS);
   2229 	if (error) {
   2230 		aprint_error_dev(sc->sc_dev,
   2231 		    "unable to create TxRx workqueue\n");
   2232 		goto out;
   2233 	}
   2234 
   2235 	snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
   2236 	error = workqueue_create(&sc->sc_reset_wq, wqname,
   2237 	    wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
   2238 	    WQ_MPSAFE);
   2239 	if (error) {
   2240 		workqueue_destroy(sc->sc_queue_wq);
   2241 		aprint_error_dev(sc->sc_dev,
   2242 		    "unable to create reset workqueue\n");
   2243 		goto out;
   2244 	}
   2245 
   2246 	/*
   2247 	 * Check the function ID (unit number of the chip).
   2248 	 */
   2249 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2250 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2251 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2252 	    || (sc->sc_type == WM_T_82580)
   2253 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2254 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2255 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2256 	else
   2257 		sc->sc_funcid = 0;
   2258 
   2259 	/*
   2260 	 * Determine a few things about the bus we're connected to.
   2261 	 */
   2262 	if (sc->sc_type < WM_T_82543) {
   2263 		/* We don't really know the bus characteristics here. */
   2264 		sc->sc_bus_speed = 33;
   2265 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2266 		/*
   2267 		 * CSA (Communication Streaming Architecture) is about as fast
   2268 		 * a 32-bit 66MHz PCI Bus.
   2269 		 */
   2270 		sc->sc_flags |= WM_F_CSA;
   2271 		sc->sc_bus_speed = 66;
   2272 		aprint_verbose_dev(sc->sc_dev,
   2273 		    "Communication Streaming Architecture\n");
   2274 		if (sc->sc_type == WM_T_82547) {
   2275 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2276 			callout_setfunc(&sc->sc_txfifo_ch,
   2277 			    wm_82547_txfifo_stall, sc);
   2278 			aprint_verbose_dev(sc->sc_dev,
   2279 			    "using 82547 Tx FIFO stall work-around\n");
   2280 		}
   2281 	} else if (sc->sc_type >= WM_T_82571) {
   2282 		sc->sc_flags |= WM_F_PCIE;
   2283 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2284 		    && (sc->sc_type != WM_T_ICH10)
   2285 		    && (sc->sc_type != WM_T_PCH)
   2286 		    && (sc->sc_type != WM_T_PCH2)
   2287 		    && (sc->sc_type != WM_T_PCH_LPT)
   2288 		    && (sc->sc_type != WM_T_PCH_SPT)
   2289 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2290 			/* ICH* and PCH* have no PCIe capability registers */
   2291 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2292 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2293 				NULL) == 0)
   2294 				aprint_error_dev(sc->sc_dev,
   2295 				    "unable to find PCIe capability\n");
   2296 		}
   2297 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2298 	} else {
   2299 		reg = CSR_READ(sc, WMREG_STATUS);
   2300 		if (reg & STATUS_BUS64)
   2301 			sc->sc_flags |= WM_F_BUS64;
   2302 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2303 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2304 
   2305 			sc->sc_flags |= WM_F_PCIX;
   2306 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2307 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2308 				aprint_error_dev(sc->sc_dev,
   2309 				    "unable to find PCIX capability\n");
   2310 			else if (sc->sc_type != WM_T_82545_3 &&
   2311 				 sc->sc_type != WM_T_82546_3) {
   2312 				/*
   2313 				 * Work around a problem caused by the BIOS
   2314 				 * setting the max memory read byte count
   2315 				 * incorrectly.
   2316 				 */
   2317 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2318 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2319 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2320 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2321 
   2322 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2323 				    PCIX_CMD_BYTECNT_SHIFT;
   2324 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2325 				    PCIX_STATUS_MAXB_SHIFT;
   2326 				if (bytecnt > maxb) {
   2327 					aprint_verbose_dev(sc->sc_dev,
   2328 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2329 					    512 << bytecnt, 512 << maxb);
   2330 					pcix_cmd = (pcix_cmd &
   2331 					    ~PCIX_CMD_BYTECNT_MASK) |
   2332 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2333 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2334 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2335 					    pcix_cmd);
   2336 				}
   2337 			}
   2338 		}
   2339 		/*
   2340 		 * The quad port adapter is special; it has a PCIX-PCIX
   2341 		 * bridge on the board, and can run the secondary bus at
   2342 		 * a higher speed.
   2343 		 */
   2344 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2345 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2346 								      : 66;
   2347 		} else if (sc->sc_flags & WM_F_PCIX) {
   2348 			switch (reg & STATUS_PCIXSPD_MASK) {
   2349 			case STATUS_PCIXSPD_50_66:
   2350 				sc->sc_bus_speed = 66;
   2351 				break;
   2352 			case STATUS_PCIXSPD_66_100:
   2353 				sc->sc_bus_speed = 100;
   2354 				break;
   2355 			case STATUS_PCIXSPD_100_133:
   2356 				sc->sc_bus_speed = 133;
   2357 				break;
   2358 			default:
   2359 				aprint_error_dev(sc->sc_dev,
   2360 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2361 				    reg & STATUS_PCIXSPD_MASK);
   2362 				sc->sc_bus_speed = 66;
   2363 				break;
   2364 			}
   2365 		} else
   2366 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2367 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2368 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2369 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2370 	}
   2371 
   2372 	/* clear interesting stat counters */
   2373 	CSR_READ(sc, WMREG_COLC);
   2374 	CSR_READ(sc, WMREG_RXERRC);
   2375 
   2376 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2377 	    || (sc->sc_type >= WM_T_ICH8))
   2378 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2379 	if (sc->sc_type >= WM_T_ICH8)
   2380 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2381 
   2382 	/* Set PHY, NVM mutex related stuff */
   2383 	switch (sc->sc_type) {
   2384 	case WM_T_82542_2_0:
   2385 	case WM_T_82542_2_1:
   2386 	case WM_T_82543:
   2387 	case WM_T_82544:
   2388 		/* Microwire */
   2389 		sc->nvm.read = wm_nvm_read_uwire;
   2390 		sc->sc_nvm_wordsize = 64;
   2391 		sc->sc_nvm_addrbits = 6;
   2392 		break;
   2393 	case WM_T_82540:
   2394 	case WM_T_82545:
   2395 	case WM_T_82545_3:
   2396 	case WM_T_82546:
   2397 	case WM_T_82546_3:
   2398 		/* Microwire */
   2399 		sc->nvm.read = wm_nvm_read_uwire;
   2400 		reg = CSR_READ(sc, WMREG_EECD);
   2401 		if (reg & EECD_EE_SIZE) {
   2402 			sc->sc_nvm_wordsize = 256;
   2403 			sc->sc_nvm_addrbits = 8;
   2404 		} else {
   2405 			sc->sc_nvm_wordsize = 64;
   2406 			sc->sc_nvm_addrbits = 6;
   2407 		}
   2408 		sc->sc_flags |= WM_F_LOCK_EECD;
   2409 		sc->nvm.acquire = wm_get_eecd;
   2410 		sc->nvm.release = wm_put_eecd;
   2411 		break;
   2412 	case WM_T_82541:
   2413 	case WM_T_82541_2:
   2414 	case WM_T_82547:
   2415 	case WM_T_82547_2:
   2416 		reg = CSR_READ(sc, WMREG_EECD);
   2417 		/*
   2418 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2419 		 * on 8254[17], so set flags and functios before calling it.
   2420 		 */
   2421 		sc->sc_flags |= WM_F_LOCK_EECD;
   2422 		sc->nvm.acquire = wm_get_eecd;
   2423 		sc->nvm.release = wm_put_eecd;
   2424 		if (reg & EECD_EE_TYPE) {
   2425 			/* SPI */
   2426 			sc->nvm.read = wm_nvm_read_spi;
   2427 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2428 			wm_nvm_set_addrbits_size_eecd(sc);
   2429 		} else {
   2430 			/* Microwire */
   2431 			sc->nvm.read = wm_nvm_read_uwire;
   2432 			if ((reg & EECD_EE_ABITS) != 0) {
   2433 				sc->sc_nvm_wordsize = 256;
   2434 				sc->sc_nvm_addrbits = 8;
   2435 			} else {
   2436 				sc->sc_nvm_wordsize = 64;
   2437 				sc->sc_nvm_addrbits = 6;
   2438 			}
   2439 		}
   2440 		break;
   2441 	case WM_T_82571:
   2442 	case WM_T_82572:
   2443 		/* SPI */
   2444 		sc->nvm.read = wm_nvm_read_eerd;
   2445 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2446 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2447 		wm_nvm_set_addrbits_size_eecd(sc);
   2448 		sc->phy.acquire = wm_get_swsm_semaphore;
   2449 		sc->phy.release = wm_put_swsm_semaphore;
   2450 		sc->nvm.acquire = wm_get_nvm_82571;
   2451 		sc->nvm.release = wm_put_nvm_82571;
   2452 		break;
   2453 	case WM_T_82573:
   2454 	case WM_T_82574:
   2455 	case WM_T_82583:
   2456 		sc->nvm.read = wm_nvm_read_eerd;
   2457 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2458 		if (sc->sc_type == WM_T_82573) {
   2459 			sc->phy.acquire = wm_get_swsm_semaphore;
   2460 			sc->phy.release = wm_put_swsm_semaphore;
   2461 			sc->nvm.acquire = wm_get_nvm_82571;
   2462 			sc->nvm.release = wm_put_nvm_82571;
   2463 		} else {
   2464 			/* Both PHY and NVM use the same semaphore. */
   2465 			sc->phy.acquire = sc->nvm.acquire
   2466 			    = wm_get_swfwhw_semaphore;
   2467 			sc->phy.release = sc->nvm.release
   2468 			    = wm_put_swfwhw_semaphore;
   2469 		}
   2470 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2471 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2472 			sc->sc_nvm_wordsize = 2048;
   2473 		} else {
   2474 			/* SPI */
   2475 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2476 			wm_nvm_set_addrbits_size_eecd(sc);
   2477 		}
   2478 		break;
   2479 	case WM_T_82575:
   2480 	case WM_T_82576:
   2481 	case WM_T_82580:
   2482 	case WM_T_I350:
   2483 	case WM_T_I354:
   2484 	case WM_T_80003:
   2485 		/* SPI */
   2486 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2487 		wm_nvm_set_addrbits_size_eecd(sc);
   2488 		if ((sc->sc_type == WM_T_80003)
   2489 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2490 			sc->nvm.read = wm_nvm_read_eerd;
   2491 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2492 		} else {
   2493 			sc->nvm.read = wm_nvm_read_spi;
   2494 			sc->sc_flags |= WM_F_LOCK_EECD;
   2495 		}
   2496 		sc->phy.acquire = wm_get_phy_82575;
   2497 		sc->phy.release = wm_put_phy_82575;
   2498 		sc->nvm.acquire = wm_get_nvm_80003;
   2499 		sc->nvm.release = wm_put_nvm_80003;
   2500 		break;
   2501 	case WM_T_ICH8:
   2502 	case WM_T_ICH9:
   2503 	case WM_T_ICH10:
   2504 	case WM_T_PCH:
   2505 	case WM_T_PCH2:
   2506 	case WM_T_PCH_LPT:
   2507 		sc->nvm.read = wm_nvm_read_ich8;
   2508 		/* FLASH */
   2509 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2510 		sc->sc_nvm_wordsize = 2048;
   2511 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2512 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2513 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2514 			aprint_error_dev(sc->sc_dev,
   2515 			    "can't map FLASH registers\n");
   2516 			goto out;
   2517 		}
   2518 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2519 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2520 		    ICH_FLASH_SECTOR_SIZE;
   2521 		sc->sc_ich8_flash_bank_size =
   2522 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2523 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2524 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2525 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2526 		sc->sc_flashreg_offset = 0;
   2527 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2528 		sc->phy.release = wm_put_swflag_ich8lan;
   2529 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2530 		sc->nvm.release = wm_put_nvm_ich8lan;
   2531 		break;
   2532 	case WM_T_PCH_SPT:
   2533 	case WM_T_PCH_CNP:
   2534 		sc->nvm.read = wm_nvm_read_spt;
   2535 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2536 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2537 		sc->sc_flasht = sc->sc_st;
   2538 		sc->sc_flashh = sc->sc_sh;
   2539 		sc->sc_ich8_flash_base = 0;
   2540 		sc->sc_nvm_wordsize =
   2541 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2542 		    * NVM_SIZE_MULTIPLIER;
   2543 		/* It is size in bytes, we want words */
   2544 		sc->sc_nvm_wordsize /= 2;
   2545 		/* Assume 2 banks */
   2546 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2547 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2548 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2549 		sc->phy.release = wm_put_swflag_ich8lan;
   2550 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2551 		sc->nvm.release = wm_put_nvm_ich8lan;
   2552 		break;
   2553 	case WM_T_I210:
   2554 	case WM_T_I211:
   2555 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2556 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2557 		if (wm_nvm_flash_presence_i210(sc)) {
   2558 			sc->nvm.read = wm_nvm_read_eerd;
   2559 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2560 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2561 			wm_nvm_set_addrbits_size_eecd(sc);
   2562 		} else {
   2563 			sc->nvm.read = wm_nvm_read_invm;
   2564 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2565 			sc->sc_nvm_wordsize = INVM_SIZE;
   2566 		}
   2567 		sc->phy.acquire = wm_get_phy_82575;
   2568 		sc->phy.release = wm_put_phy_82575;
   2569 		sc->nvm.acquire = wm_get_nvm_80003;
   2570 		sc->nvm.release = wm_put_nvm_80003;
   2571 		break;
   2572 	default:
   2573 		break;
   2574 	}
   2575 
   2576 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2577 	switch (sc->sc_type) {
   2578 	case WM_T_82571:
   2579 	case WM_T_82572:
   2580 		reg = CSR_READ(sc, WMREG_SWSM2);
   2581 		if ((reg & SWSM2_LOCK) == 0) {
   2582 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2583 			force_clear_smbi = true;
   2584 		} else
   2585 			force_clear_smbi = false;
   2586 		break;
   2587 	case WM_T_82573:
   2588 	case WM_T_82574:
   2589 	case WM_T_82583:
   2590 		force_clear_smbi = true;
   2591 		break;
   2592 	default:
   2593 		force_clear_smbi = false;
   2594 		break;
   2595 	}
   2596 	if (force_clear_smbi) {
   2597 		reg = CSR_READ(sc, WMREG_SWSM);
   2598 		if ((reg & SWSM_SMBI) != 0)
   2599 			aprint_error_dev(sc->sc_dev,
   2600 			    "Please update the Bootagent\n");
   2601 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2602 	}
   2603 
   2604 	/*
   2605 	 * Defer printing the EEPROM type until after verifying the checksum
   2606 	 * This allows the EEPROM type to be printed correctly in the case
   2607 	 * that no EEPROM is attached.
   2608 	 */
   2609 	/*
   2610 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2611 	 * this for later, so we can fail future reads from the EEPROM.
   2612 	 */
   2613 	if (wm_nvm_validate_checksum(sc)) {
   2614 		/*
   2615 		 * Read twice again because some PCI-e parts fail the
   2616 		 * first check due to the link being in sleep state.
   2617 		 */
   2618 		if (wm_nvm_validate_checksum(sc))
   2619 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2620 	}
   2621 
   2622 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2623 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2624 	else {
   2625 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2626 		    sc->sc_nvm_wordsize);
   2627 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2628 			aprint_verbose("iNVM");
   2629 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2630 			aprint_verbose("FLASH(HW)");
   2631 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2632 			aprint_verbose("FLASH");
   2633 		else {
   2634 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2635 				eetype = "SPI";
   2636 			else
   2637 				eetype = "MicroWire";
   2638 			aprint_verbose("(%d address bits) %s EEPROM",
   2639 			    sc->sc_nvm_addrbits, eetype);
   2640 		}
   2641 	}
   2642 	wm_nvm_version(sc);
   2643 	aprint_verbose("\n");
   2644 
   2645 	/*
   2646 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2647 	 * incorrect.
   2648 	 */
   2649 	wm_gmii_setup_phytype(sc, 0, 0);
   2650 
   2651 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2652 	switch (sc->sc_type) {
   2653 	case WM_T_ICH8:
   2654 	case WM_T_ICH9:
   2655 	case WM_T_ICH10:
   2656 	case WM_T_PCH:
   2657 	case WM_T_PCH2:
   2658 	case WM_T_PCH_LPT:
   2659 	case WM_T_PCH_SPT:
   2660 	case WM_T_PCH_CNP:
   2661 		apme_mask = WUC_APME;
   2662 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2663 		if ((eeprom_data & apme_mask) != 0)
   2664 			sc->sc_flags |= WM_F_WOL;
   2665 		break;
   2666 	default:
   2667 		break;
   2668 	}
   2669 
   2670 	/* Reset the chip to a known state. */
   2671 	wm_reset(sc);
   2672 
   2673 	/*
   2674 	 * Check for I21[01] PLL workaround.
   2675 	 *
   2676 	 * Three cases:
   2677 	 * a) Chip is I211.
   2678 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2679 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2680 	 */
   2681 	if (sc->sc_type == WM_T_I211)
   2682 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2683 	if (sc->sc_type == WM_T_I210) {
   2684 		if (!wm_nvm_flash_presence_i210(sc))
   2685 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2686 		else if ((sc->sc_nvm_ver_major < 3)
   2687 		    || ((sc->sc_nvm_ver_major == 3)
   2688 			&& (sc->sc_nvm_ver_minor < 25))) {
   2689 			aprint_verbose_dev(sc->sc_dev,
   2690 			    "ROM image version %d.%d is older than 3.25\n",
   2691 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2692 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2693 		}
   2694 	}
   2695 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2696 		wm_pll_workaround_i210(sc);
   2697 
   2698 	wm_get_wakeup(sc);
   2699 
   2700 	/* Non-AMT based hardware can now take control from firmware */
   2701 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2702 		wm_get_hw_control(sc);
   2703 
   2704 	/*
   2705 	 * Read the Ethernet address from the EEPROM, if not first found
   2706 	 * in device properties.
   2707 	 */
   2708 	ea = prop_dictionary_get(dict, "mac-address");
   2709 	if (ea != NULL) {
   2710 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2711 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2712 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2713 	} else {
   2714 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2715 			aprint_error_dev(sc->sc_dev,
   2716 			    "unable to read Ethernet address\n");
   2717 			goto out;
   2718 		}
   2719 	}
   2720 
   2721 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2722 	    ether_sprintf(enaddr));
   2723 
   2724 	/*
   2725 	 * Read the config info from the EEPROM, and set up various
   2726 	 * bits in the control registers based on their contents.
   2727 	 */
   2728 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2729 	if (pn != NULL) {
   2730 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2731 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2732 	} else {
   2733 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2734 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2735 			goto out;
   2736 		}
   2737 	}
   2738 
   2739 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2740 	if (pn != NULL) {
   2741 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2742 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2743 	} else {
   2744 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2745 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2746 			goto out;
   2747 		}
   2748 	}
   2749 
   2750 	/* check for WM_F_WOL */
   2751 	switch (sc->sc_type) {
   2752 	case WM_T_82542_2_0:
   2753 	case WM_T_82542_2_1:
   2754 	case WM_T_82543:
   2755 		/* dummy? */
   2756 		eeprom_data = 0;
   2757 		apme_mask = NVM_CFG3_APME;
   2758 		break;
   2759 	case WM_T_82544:
   2760 		apme_mask = NVM_CFG2_82544_APM_EN;
   2761 		eeprom_data = cfg2;
   2762 		break;
   2763 	case WM_T_82546:
   2764 	case WM_T_82546_3:
   2765 	case WM_T_82571:
   2766 	case WM_T_82572:
   2767 	case WM_T_82573:
   2768 	case WM_T_82574:
   2769 	case WM_T_82583:
   2770 	case WM_T_80003:
   2771 	case WM_T_82575:
   2772 	case WM_T_82576:
   2773 		apme_mask = NVM_CFG3_APME;
   2774 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2775 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2776 		break;
   2777 	case WM_T_82580:
   2778 	case WM_T_I350:
   2779 	case WM_T_I354:
   2780 	case WM_T_I210:
   2781 	case WM_T_I211:
   2782 		apme_mask = NVM_CFG3_APME;
   2783 		wm_nvm_read(sc,
   2784 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2785 		    1, &eeprom_data);
   2786 		break;
   2787 	case WM_T_ICH8:
   2788 	case WM_T_ICH9:
   2789 	case WM_T_ICH10:
   2790 	case WM_T_PCH:
   2791 	case WM_T_PCH2:
   2792 	case WM_T_PCH_LPT:
   2793 	case WM_T_PCH_SPT:
   2794 	case WM_T_PCH_CNP:
   2795 		/* Already checked before wm_reset () */
   2796 		apme_mask = eeprom_data = 0;
   2797 		break;
   2798 	default: /* XXX 82540 */
   2799 		apme_mask = NVM_CFG3_APME;
   2800 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2801 		break;
   2802 	}
   2803 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2804 	if ((eeprom_data & apme_mask) != 0)
   2805 		sc->sc_flags |= WM_F_WOL;
   2806 
   2807 	/*
   2808 	 * We have the eeprom settings, now apply the special cases
   2809 	 * where the eeprom may be wrong or the board won't support
   2810 	 * wake on lan on a particular port
   2811 	 */
   2812 	switch (sc->sc_pcidevid) {
   2813 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2814 		sc->sc_flags &= ~WM_F_WOL;
   2815 		break;
   2816 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2817 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2818 		/* Wake events only supported on port A for dual fiber
   2819 		 * regardless of eeprom setting */
   2820 		if (sc->sc_funcid == 1)
   2821 			sc->sc_flags &= ~WM_F_WOL;
   2822 		break;
   2823 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2824 		/* If quad port adapter, disable WoL on all but port A */
   2825 		if (sc->sc_funcid != 0)
   2826 			sc->sc_flags &= ~WM_F_WOL;
   2827 		break;
   2828 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2829 		/* Wake events only supported on port A for dual fiber
   2830 		 * regardless of eeprom setting */
   2831 		if (sc->sc_funcid == 1)
   2832 			sc->sc_flags &= ~WM_F_WOL;
   2833 		break;
   2834 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2835 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2836 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2837 		/* If quad port adapter, disable WoL on all but port A */
   2838 		if (sc->sc_funcid != 0)
   2839 			sc->sc_flags &= ~WM_F_WOL;
   2840 		break;
   2841 	}
   2842 
   2843 	if (sc->sc_type >= WM_T_82575) {
   2844 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2845 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2846 			    nvmword);
   2847 			if ((sc->sc_type == WM_T_82575) ||
   2848 			    (sc->sc_type == WM_T_82576)) {
   2849 				/* Check NVM for autonegotiation */
   2850 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2851 				    != 0)
   2852 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2853 			}
   2854 			if ((sc->sc_type == WM_T_82575) ||
   2855 			    (sc->sc_type == WM_T_I350)) {
   2856 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2857 					sc->sc_flags |= WM_F_MAS;
   2858 			}
   2859 		}
   2860 	}
   2861 
   2862 	/*
   2863 	 * XXX need special handling for some multiple port cards
   2864 	 * to disable a paticular port.
   2865 	 */
   2866 
   2867 	if (sc->sc_type >= WM_T_82544) {
   2868 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2869 		if (pn != NULL) {
   2870 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2871 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2872 		} else {
   2873 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2874 				aprint_error_dev(sc->sc_dev,
   2875 				    "unable to read SWDPIN\n");
   2876 				goto out;
   2877 			}
   2878 		}
   2879 	}
   2880 
   2881 	if (cfg1 & NVM_CFG1_ILOS)
   2882 		sc->sc_ctrl |= CTRL_ILOS;
   2883 
   2884 	/*
   2885 	 * XXX
   2886 	 * This code isn't correct because pin 2 and 3 are located
   2887 	 * in different position on newer chips. Check all datasheet.
   2888 	 *
   2889 	 * Until resolve this problem, check if a chip < 82580
   2890 	 */
   2891 	if (sc->sc_type <= WM_T_82580) {
   2892 		if (sc->sc_type >= WM_T_82544) {
   2893 			sc->sc_ctrl |=
   2894 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2895 			    CTRL_SWDPIO_SHIFT;
   2896 			sc->sc_ctrl |=
   2897 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2898 			    CTRL_SWDPINS_SHIFT;
   2899 		} else {
   2900 			sc->sc_ctrl |=
   2901 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2902 			    CTRL_SWDPIO_SHIFT;
   2903 		}
   2904 	}
   2905 
   2906 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2907 		wm_nvm_read(sc,
   2908 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2909 		    1, &nvmword);
   2910 		if (nvmword & NVM_CFG3_ILOS)
   2911 			sc->sc_ctrl |= CTRL_ILOS;
   2912 	}
   2913 
   2914 #if 0
   2915 	if (sc->sc_type >= WM_T_82544) {
   2916 		if (cfg1 & NVM_CFG1_IPS0)
   2917 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2918 		if (cfg1 & NVM_CFG1_IPS1)
   2919 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2920 		sc->sc_ctrl_ext |=
   2921 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2922 		    CTRL_EXT_SWDPIO_SHIFT;
   2923 		sc->sc_ctrl_ext |=
   2924 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2925 		    CTRL_EXT_SWDPINS_SHIFT;
   2926 	} else {
   2927 		sc->sc_ctrl_ext |=
   2928 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2929 		    CTRL_EXT_SWDPIO_SHIFT;
   2930 	}
   2931 #endif
   2932 
   2933 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2934 #if 0
   2935 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2936 #endif
   2937 
   2938 	if (sc->sc_type == WM_T_PCH) {
   2939 		uint16_t val;
   2940 
   2941 		/* Save the NVM K1 bit setting */
   2942 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2943 
   2944 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2945 			sc->sc_nvm_k1_enabled = 1;
   2946 		else
   2947 			sc->sc_nvm_k1_enabled = 0;
   2948 	}
   2949 
   2950 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2951 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2952 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2953 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2954 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2955 	    || sc->sc_type == WM_T_82573
   2956 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2957 		/* Copper only */
   2958 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2959 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2960 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2961 	    || (sc->sc_type ==WM_T_I211)) {
   2962 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2963 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2964 		switch (link_mode) {
   2965 		case CTRL_EXT_LINK_MODE_1000KX:
   2966 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2967 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2968 			break;
   2969 		case CTRL_EXT_LINK_MODE_SGMII:
   2970 			if (wm_sgmii_uses_mdio(sc)) {
   2971 				aprint_normal_dev(sc->sc_dev,
   2972 				    "SGMII(MDIO)\n");
   2973 				sc->sc_flags |= WM_F_SGMII;
   2974 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2975 				break;
   2976 			}
   2977 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2978 			/*FALLTHROUGH*/
   2979 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2980 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2981 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2982 				if (link_mode
   2983 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2984 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2985 					sc->sc_flags |= WM_F_SGMII;
   2986 					aprint_verbose_dev(sc->sc_dev,
   2987 					    "SGMII\n");
   2988 				} else {
   2989 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2990 					aprint_verbose_dev(sc->sc_dev,
   2991 					    "SERDES\n");
   2992 				}
   2993 				break;
   2994 			}
   2995 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2996 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2997 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2998 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2999 				sc->sc_flags |= WM_F_SGMII;
   3000 			}
   3001 			/* Do not change link mode for 100BaseFX */
   3002 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   3003 				break;
   3004 
   3005 			/* Change current link mode setting */
   3006 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   3007 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3008 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   3009 			else
   3010 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   3011 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3012 			break;
   3013 		case CTRL_EXT_LINK_MODE_GMII:
   3014 		default:
   3015 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   3016 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3017 			break;
   3018 		}
   3019 
   3020 		reg &= ~CTRL_EXT_I2C_ENA;
   3021 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   3022 			reg |= CTRL_EXT_I2C_ENA;
   3023 		else
   3024 			reg &= ~CTRL_EXT_I2C_ENA;
   3025 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3026 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   3027 			if (!wm_sgmii_uses_mdio(sc))
   3028 				wm_gmii_setup_phytype(sc, 0, 0);
   3029 			wm_reset_mdicnfg_82580(sc);
   3030 		}
   3031 	} else if (sc->sc_type < WM_T_82543 ||
   3032 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   3033 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3034 			aprint_error_dev(sc->sc_dev,
   3035 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   3036 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   3037 		}
   3038 	} else {
   3039 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   3040 			aprint_error_dev(sc->sc_dev,
   3041 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   3042 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3043 		}
   3044 	}
   3045 
   3046 	if (sc->sc_type >= WM_T_PCH2)
   3047 		sc->sc_flags |= WM_F_EEE;
   3048 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   3049 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   3050 		/* XXX: Need special handling for I354. (not yet) */
   3051 		if (sc->sc_type != WM_T_I354)
   3052 			sc->sc_flags |= WM_F_EEE;
   3053 	}
   3054 
   3055 	/*
   3056 	 * The I350 has a bug where it always strips the CRC whether
   3057 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   3058 	 */
   3059 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3060 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3061 		sc->sc_flags |= WM_F_CRC_STRIP;
   3062 
   3063 	/* Set device properties (macflags) */
   3064 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   3065 
   3066 	if (sc->sc_flags != 0) {
   3067 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   3068 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   3069 	}
   3070 
   3071 #ifdef WM_MPSAFE
   3072 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   3073 #else
   3074 	sc->sc_core_lock = NULL;
   3075 #endif
   3076 
   3077 	/* Initialize the media structures accordingly. */
   3078 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3079 		wm_gmii_mediainit(sc, wmp->wmp_product);
   3080 	else
   3081 		wm_tbi_mediainit(sc); /* All others */
   3082 
   3083 	ifp = &sc->sc_ethercom.ec_if;
   3084 	xname = device_xname(sc->sc_dev);
   3085 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3086 	ifp->if_softc = sc;
   3087 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3088 #ifdef WM_MPSAFE
   3089 	ifp->if_extflags = IFEF_MPSAFE;
   3090 #endif
   3091 	ifp->if_ioctl = wm_ioctl;
   3092 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3093 		ifp->if_start = wm_nq_start;
   3094 		/*
   3095 		 * When the number of CPUs is one and the controller can use
   3096 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3097 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3098 		 * and the other is used for link status changing.
   3099 		 * In this situation, wm_nq_transmit() is disadvantageous
   3100 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3101 		 */
   3102 		if (wm_is_using_multiqueue(sc))
   3103 			ifp->if_transmit = wm_nq_transmit;
   3104 	} else {
   3105 		ifp->if_start = wm_start;
   3106 		/*
   3107 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
   3108 		 * described above.
   3109 		 */
   3110 		if (wm_is_using_multiqueue(sc))
   3111 			ifp->if_transmit = wm_transmit;
   3112 	}
   3113 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3114 	ifp->if_init = wm_init;
   3115 	ifp->if_stop = wm_stop;
   3116 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3117 	IFQ_SET_READY(&ifp->if_snd);
   3118 
   3119 	/* Check for jumbo frame */
   3120 	switch (sc->sc_type) {
   3121 	case WM_T_82573:
   3122 		/* XXX limited to 9234 if ASPM is disabled */
   3123 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3124 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3125 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3126 		break;
   3127 	case WM_T_82571:
   3128 	case WM_T_82572:
   3129 	case WM_T_82574:
   3130 	case WM_T_82583:
   3131 	case WM_T_82575:
   3132 	case WM_T_82576:
   3133 	case WM_T_82580:
   3134 	case WM_T_I350:
   3135 	case WM_T_I354:
   3136 	case WM_T_I210:
   3137 	case WM_T_I211:
   3138 	case WM_T_80003:
   3139 	case WM_T_ICH9:
   3140 	case WM_T_ICH10:
   3141 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3142 	case WM_T_PCH_LPT:
   3143 	case WM_T_PCH_SPT:
   3144 	case WM_T_PCH_CNP:
   3145 		/* XXX limited to 9234 */
   3146 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3147 		break;
   3148 	case WM_T_PCH:
   3149 		/* XXX limited to 4096 */
   3150 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3151 		break;
   3152 	case WM_T_82542_2_0:
   3153 	case WM_T_82542_2_1:
   3154 	case WM_T_ICH8:
   3155 		/* No support for jumbo frame */
   3156 		break;
   3157 	default:
   3158 		/* ETHER_MAX_LEN_JUMBO */
   3159 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3160 		break;
   3161 	}
   3162 
   3163 	/* If we're a i82543 or greater, we can support VLANs. */
   3164 	if (sc->sc_type >= WM_T_82543) {
   3165 		sc->sc_ethercom.ec_capabilities |=
   3166 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3167 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3168 	}
   3169 
   3170 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3171 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3172 
   3173 	/*
   3174 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
   3175 	 * on i82543 and later.
   3176 	 */
   3177 	if (sc->sc_type >= WM_T_82543) {
   3178 		ifp->if_capabilities |=
   3179 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3180 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3181 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3182 		    IFCAP_CSUM_TCPv6_Tx |
   3183 		    IFCAP_CSUM_UDPv6_Tx;
   3184 	}
   3185 
   3186 	/*
   3187 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3188 	 *
   3189 	 *	82541GI (8086:1076) ... no
   3190 	 *	82572EI (8086:10b9) ... yes
   3191 	 */
   3192 	if (sc->sc_type >= WM_T_82571) {
   3193 		ifp->if_capabilities |=
   3194 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3195 	}
   3196 
   3197 	/*
   3198 	 * If we're a i82544 or greater (except i82547), we can do
   3199 	 * TCP segmentation offload.
   3200 	 */
   3201 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
   3202 		ifp->if_capabilities |= IFCAP_TSOv4;
   3203 
   3204 	if (sc->sc_type >= WM_T_82571)
   3205 		ifp->if_capabilities |= IFCAP_TSOv6;
   3206 
   3207 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3208 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3209 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3210 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3211 
   3212 	/* Attach the interface. */
   3213 	if_initialize(ifp);
   3214 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3215 	ether_ifattach(ifp, enaddr);
   3216 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3217 	if_register(ifp);
   3218 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3219 	    RND_FLAG_DEFAULT);
   3220 
   3221 #ifdef WM_EVENT_COUNTERS
   3222 	/* Attach event counters. */
   3223 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3224 	    NULL, xname, "linkintr");
   3225 
   3226 	if (sc->sc_type >= WM_T_82542_2_1) {
   3227 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3228 		    NULL, xname, "tx_xoff");
   3229 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3230 		    NULL, xname, "tx_xon");
   3231 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3232 		    NULL, xname, "rx_xoff");
   3233 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3234 		    NULL, xname, "rx_xon");
   3235 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3236 		    NULL, xname, "rx_macctl");
   3237 	}
   3238 
   3239 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
   3240 	    NULL, xname, "CRC Error");
   3241 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
   3242 	    NULL, xname, "Symbol Error");
   3243 
   3244 	if (sc->sc_type >= WM_T_82543) {
   3245 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
   3246 		    NULL, xname, "Alignment Error");
   3247 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
   3248 		    NULL, xname, "Receive Error");
   3249 		evcnt_attach_dynamic(&sc->sc_ev_cexterr, EVCNT_TYPE_MISC,
   3250 		    NULL, xname, "Carrier Extension Error");
   3251 	}
   3252 
   3253 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
   3254 	    NULL, xname, "Missed Packets");
   3255 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
   3256 	    NULL, xname, "Collision");
   3257 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
   3258 	    NULL, xname, "Sequence Error");
   3259 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
   3260 	    NULL, xname, "Receive Length Error");
   3261 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
   3262 	    NULL, xname, "Single Collision");
   3263 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
   3264 	    NULL, xname, "Excessive Collisions");
   3265 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
   3266 	    NULL, xname, "Multiple Collision");
   3267 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
   3268 	    NULL, xname, "Late Collisions");
   3269 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
   3270 	    NULL, xname, "Defer");
   3271 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
   3272 	    NULL, xname, "Good Packets Rx");
   3273 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
   3274 	    NULL, xname, "Broadcast Packets Rx");
   3275 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
   3276 	    NULL, xname, "Multicast Packets Rx");
   3277 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
   3278 	    NULL, xname, "Good Packets Tx");
   3279 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
   3280 	    NULL, xname, "Good Octets Rx");
   3281 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
   3282 	    NULL, xname, "Good Octets Tx");
   3283 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
   3284 	    NULL, xname, "Rx No Buffers");
   3285 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
   3286 	    NULL, xname, "Rx Undersize");
   3287 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
   3288 	    NULL, xname, "Rx Fragment");
   3289 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
   3290 	    NULL, xname, "Rx Oversize");
   3291 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
   3292 	    NULL, xname, "Rx Jabber");
   3293 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
   3294 	    NULL, xname, "Total Octets Rx");
   3295 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
   3296 	    NULL, xname, "Total Octets Tx");
   3297 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
   3298 	    NULL, xname, "Total Packets Rx");
   3299 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
   3300 	    NULL, xname, "Total Packets Tx");
   3301 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
   3302 	    NULL, xname, "Multicast Packets Tx");
   3303 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
   3304 	    NULL, xname, "Broadcast Packets Tx Count");
   3305 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
   3306 	    NULL, xname, "Packets Rx (64 bytes)");
   3307 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
   3308 	    NULL, xname, "Packets Rx (65-127 bytes)");
   3309 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
   3310 	    NULL, xname, "Packets Rx (128-255 bytes)");
   3311 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
   3312 	    NULL, xname, "Packets Rx (255-511 bytes)");
   3313 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
   3314 	    NULL, xname, "Packets Rx (512-1023 bytes)");
   3315 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
   3316 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
   3317 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
   3318 	    NULL, xname, "Packets Tx (64 bytes)");
   3319 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
   3320 	    NULL, xname, "Packets Tx (65-127 bytes)");
   3321 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
   3322 	    NULL, xname, "Packets Tx (128-255 bytes)");
   3323 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
   3324 	    NULL, xname, "Packets Tx (256-511 bytes)");
   3325 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
   3326 	    NULL, xname, "Packets Tx (512-1023 bytes)");
   3327 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
   3328 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
   3329 	evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
   3330 	    NULL, xname, "Interrupt Assertion");
   3331 	evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
   3332 	    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
   3333 	evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
   3334 	    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
   3335 	evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
   3336 	    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
   3337 	evcnt_attach_dynamic(&sc->sc_ev_ictxact, EVCNT_TYPE_MISC,
   3338 	    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
   3339 	evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
   3340 	    NULL, xname, "Intr. Cause Tx Queue Empty");
   3341 	evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
   3342 	    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
   3343 	evcnt_attach_dynamic(&sc->sc_ev_icrxdmtc, EVCNT_TYPE_MISC,
   3344 	    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
   3345 	evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
   3346 	    NULL, xname, "Interrupt Cause Receiver Overrun");
   3347 	if (sc->sc_type >= WM_T_82543) {
   3348 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
   3349 		    NULL, xname, "Tx with No CRS");
   3350 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
   3351 		    NULL, xname, "TCP Segmentation Context Tx");
   3352 		evcnt_attach_dynamic(&sc->sc_ev_tsctfc, EVCNT_TYPE_MISC,
   3353 		    NULL, xname, "TCP Segmentation Context Tx Fail");
   3354 	}
   3355 	if (sc->sc_type >= WM_T_82540) {
   3356 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
   3357 		    NULL, xname, "Management Packets RX");
   3358 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
   3359 		    NULL, xname, "Management Packets Dropped");
   3360 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
   3361 		    NULL, xname, "Management Packets TX");
   3362 	}
   3363 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
   3364 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
   3365 		    NULL, xname, "BMC2OS Packets received by host");
   3366 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
   3367 		    NULL, xname, "OS2BMC Packets transmitted by host");
   3368 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
   3369 		    NULL, xname, "BMC2OS Packets sent by BMC");
   3370 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
   3371 		    NULL, xname, "OS2BMC Packets received by BMC");
   3372 	}
   3373 #endif /* WM_EVENT_COUNTERS */
   3374 
   3375 	sc->sc_txrx_use_workqueue = false;
   3376 
   3377 	if (wm_phy_need_linkdown_discard(sc)) {
   3378 		DPRINTF(sc, WM_DEBUG_LINK,
   3379 		    ("%s: %s: Set linkdown discard flag\n",
   3380 			device_xname(sc->sc_dev), __func__));
   3381 		wm_set_linkdown_discard(sc);
   3382 	}
   3383 
   3384 	wm_init_sysctls(sc);
   3385 
   3386 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3387 		pmf_class_network_register(self, ifp);
   3388 	else
   3389 		aprint_error_dev(self, "couldn't establish power handler\n");
   3390 
   3391 	sc->sc_flags |= WM_F_ATTACHED;
   3392 out:
   3393 	return;
   3394 }
   3395 
   3396 /* The detach function (ca_detach) */
   3397 static int
   3398 wm_detach(device_t self, int flags __unused)
   3399 {
   3400 	struct wm_softc *sc = device_private(self);
   3401 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3402 	int i;
   3403 
   3404 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3405 		return 0;
   3406 
   3407 	/* Stop the interface. Callouts are stopped in it. */
   3408 	IFNET_LOCK(ifp);
   3409 	sc->sc_dying = true;
   3410 	wm_stop(ifp, 1);
   3411 	IFNET_UNLOCK(ifp);
   3412 
   3413 	pmf_device_deregister(self);
   3414 
   3415 	sysctl_teardown(&sc->sc_sysctllog);
   3416 
   3417 #ifdef WM_EVENT_COUNTERS
   3418 	evcnt_detach(&sc->sc_ev_linkintr);
   3419 
   3420 	if (sc->sc_type >= WM_T_82542_2_1) {
   3421 		evcnt_detach(&sc->sc_ev_tx_xoff);
   3422 		evcnt_detach(&sc->sc_ev_tx_xon);
   3423 		evcnt_detach(&sc->sc_ev_rx_xoff);
   3424 		evcnt_detach(&sc->sc_ev_rx_xon);
   3425 		evcnt_detach(&sc->sc_ev_rx_macctl);
   3426 	}
   3427 
   3428 	evcnt_detach(&sc->sc_ev_crcerrs);
   3429 	evcnt_detach(&sc->sc_ev_symerrc);
   3430 
   3431 	if (sc->sc_type >= WM_T_82543) {
   3432 		evcnt_detach(&sc->sc_ev_algnerrc);
   3433 		evcnt_detach(&sc->sc_ev_rxerrc);
   3434 		evcnt_detach(&sc->sc_ev_cexterr);
   3435 	}
   3436 	evcnt_detach(&sc->sc_ev_mpc);
   3437 	evcnt_detach(&sc->sc_ev_colc);
   3438 	evcnt_detach(&sc->sc_ev_sec);
   3439 	evcnt_detach(&sc->sc_ev_rlec);
   3440 	evcnt_detach(&sc->sc_ev_scc);
   3441 	evcnt_detach(&sc->sc_ev_ecol);
   3442 	evcnt_detach(&sc->sc_ev_mcc);
   3443 	evcnt_detach(&sc->sc_ev_latecol);
   3444 	evcnt_detach(&sc->sc_ev_dc);
   3445 	evcnt_detach(&sc->sc_ev_gprc);
   3446 	evcnt_detach(&sc->sc_ev_bprc);
   3447 	evcnt_detach(&sc->sc_ev_mprc);
   3448 	evcnt_detach(&sc->sc_ev_gptc);
   3449 	evcnt_detach(&sc->sc_ev_gorc);
   3450 	evcnt_detach(&sc->sc_ev_gotc);
   3451 	evcnt_detach(&sc->sc_ev_rnbc);
   3452 	evcnt_detach(&sc->sc_ev_ruc);
   3453 	evcnt_detach(&sc->sc_ev_rfc);
   3454 	evcnt_detach(&sc->sc_ev_roc);
   3455 	evcnt_detach(&sc->sc_ev_rjc);
   3456 	evcnt_detach(&sc->sc_ev_tor);
   3457 	evcnt_detach(&sc->sc_ev_tot);
   3458 	evcnt_detach(&sc->sc_ev_tpr);
   3459 	evcnt_detach(&sc->sc_ev_tpt);
   3460 	evcnt_detach(&sc->sc_ev_mptc);
   3461 	evcnt_detach(&sc->sc_ev_bptc);
   3462 	evcnt_detach(&sc->sc_ev_prc64);
   3463 	evcnt_detach(&sc->sc_ev_prc127);
   3464 	evcnt_detach(&sc->sc_ev_prc255);
   3465 	evcnt_detach(&sc->sc_ev_prc511);
   3466 	evcnt_detach(&sc->sc_ev_prc1023);
   3467 	evcnt_detach(&sc->sc_ev_prc1522);
   3468 	evcnt_detach(&sc->sc_ev_ptc64);
   3469 	evcnt_detach(&sc->sc_ev_ptc127);
   3470 	evcnt_detach(&sc->sc_ev_ptc255);
   3471 	evcnt_detach(&sc->sc_ev_ptc511);
   3472 	evcnt_detach(&sc->sc_ev_ptc1023);
   3473 	evcnt_detach(&sc->sc_ev_ptc1522);
   3474 	evcnt_detach(&sc->sc_ev_iac);
   3475 	evcnt_detach(&sc->sc_ev_icrxptc);
   3476 	evcnt_detach(&sc->sc_ev_icrxatc);
   3477 	evcnt_detach(&sc->sc_ev_ictxptc);
   3478 	evcnt_detach(&sc->sc_ev_ictxact);
   3479 	evcnt_detach(&sc->sc_ev_ictxqec);
   3480 	evcnt_detach(&sc->sc_ev_ictxqmtc);
   3481 	evcnt_detach(&sc->sc_ev_icrxdmtc);
   3482 	evcnt_detach(&sc->sc_ev_icrxoc);
   3483 	if (sc->sc_type >= WM_T_82543) {
   3484 		evcnt_detach(&sc->sc_ev_tncrs);
   3485 		evcnt_detach(&sc->sc_ev_tsctc);
   3486 		evcnt_detach(&sc->sc_ev_tsctfc);
   3487 	}
   3488 	if (sc->sc_type >= WM_T_82540) {
   3489 		evcnt_detach(&sc->sc_ev_mgtprc);
   3490 		evcnt_detach(&sc->sc_ev_mgtpdc);
   3491 		evcnt_detach(&sc->sc_ev_mgtptc);
   3492 	}
   3493 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
   3494 		evcnt_detach(&sc->sc_ev_b2ogprc);
   3495 		evcnt_detach(&sc->sc_ev_o2bspc);
   3496 		evcnt_detach(&sc->sc_ev_b2ospc);
   3497 		evcnt_detach(&sc->sc_ev_o2bgptc);
   3498 	}
   3499 #endif /* WM_EVENT_COUNTERS */
   3500 
   3501 	rnd_detach_source(&sc->rnd_source);
   3502 
   3503 	/* Tell the firmware about the release */
   3504 	WM_CORE_LOCK(sc);
   3505 	wm_release_manageability(sc);
   3506 	wm_release_hw_control(sc);
   3507 	wm_enable_wakeup(sc);
   3508 	WM_CORE_UNLOCK(sc);
   3509 
   3510 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3511 
   3512 	ether_ifdetach(ifp);
   3513 	if_detach(ifp);
   3514 	if_percpuq_destroy(sc->sc_ipq);
   3515 
   3516 	/* Delete all remaining media. */
   3517 	ifmedia_fini(&sc->sc_mii.mii_media);
   3518 
   3519 	/* Unload RX dmamaps and free mbufs */
   3520 	for (i = 0; i < sc->sc_nqueues; i++) {
   3521 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3522 		mutex_enter(rxq->rxq_lock);
   3523 		wm_rxdrain(rxq);
   3524 		mutex_exit(rxq->rxq_lock);
   3525 	}
   3526 	/* Must unlock here */
   3527 
   3528 	/* Disestablish the interrupt handler */
   3529 	for (i = 0; i < sc->sc_nintrs; i++) {
   3530 		if (sc->sc_ihs[i] != NULL) {
   3531 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3532 			sc->sc_ihs[i] = NULL;
   3533 		}
   3534 	}
   3535 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3536 
   3537 	/* wm_stop() ensured that the workqueues are stopped. */
   3538 	workqueue_destroy(sc->sc_queue_wq);
   3539 	workqueue_destroy(sc->sc_reset_wq);
   3540 
   3541 	for (i = 0; i < sc->sc_nqueues; i++)
   3542 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3543 
   3544 	wm_free_txrx_queues(sc);
   3545 
   3546 	/* Unmap the registers */
   3547 	if (sc->sc_ss) {
   3548 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3549 		sc->sc_ss = 0;
   3550 	}
   3551 	if (sc->sc_ios) {
   3552 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3553 		sc->sc_ios = 0;
   3554 	}
   3555 	if (sc->sc_flashs) {
   3556 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3557 		sc->sc_flashs = 0;
   3558 	}
   3559 
   3560 	if (sc->sc_core_lock)
   3561 		mutex_obj_free(sc->sc_core_lock);
   3562 	if (sc->sc_ich_phymtx)
   3563 		mutex_obj_free(sc->sc_ich_phymtx);
   3564 	if (sc->sc_ich_nvmmtx)
   3565 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3566 
   3567 	return 0;
   3568 }
   3569 
   3570 static bool
   3571 wm_suspend(device_t self, const pmf_qual_t *qual)
   3572 {
   3573 	struct wm_softc *sc = device_private(self);
   3574 
   3575 	wm_release_manageability(sc);
   3576 	wm_release_hw_control(sc);
   3577 	wm_enable_wakeup(sc);
   3578 
   3579 	return true;
   3580 }
   3581 
   3582 static bool
   3583 wm_resume(device_t self, const pmf_qual_t *qual)
   3584 {
   3585 	struct wm_softc *sc = device_private(self);
   3586 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3587 	pcireg_t reg;
   3588 	char buf[256];
   3589 
   3590 	reg = CSR_READ(sc, WMREG_WUS);
   3591 	if (reg != 0) {
   3592 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3593 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3594 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3595 	}
   3596 
   3597 	if (sc->sc_type >= WM_T_PCH2)
   3598 		wm_resume_workarounds_pchlan(sc);
   3599 	IFNET_LOCK(ifp);
   3600 	if ((ifp->if_flags & IFF_UP) == 0) {
   3601 		/* >= PCH_SPT hardware workaround before reset. */
   3602 		if (sc->sc_type >= WM_T_PCH_SPT)
   3603 			wm_flush_desc_rings(sc);
   3604 
   3605 		wm_reset(sc);
   3606 		/* Non-AMT based hardware can now take control from firmware */
   3607 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3608 			wm_get_hw_control(sc);
   3609 		wm_init_manageability(sc);
   3610 	} else {
   3611 		/*
   3612 		 * We called pmf_class_network_register(), so if_init() is
   3613 		 * automatically called when IFF_UP. wm_reset(),
   3614 		 * wm_get_hw_control() and wm_init_manageability() are called
   3615 		 * via wm_init().
   3616 		 */
   3617 	}
   3618 	IFNET_UNLOCK(ifp);
   3619 
   3620 	return true;
   3621 }
   3622 
   3623 /*
   3624  * wm_watchdog:
   3625  *
   3626  *	Watchdog checker.
   3627  */
   3628 static bool
   3629 wm_watchdog(struct ifnet *ifp)
   3630 {
   3631 	int qid;
   3632 	struct wm_softc *sc = ifp->if_softc;
   3633 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3634 
   3635 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3636 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3637 
   3638 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3639 	}
   3640 
   3641 #ifdef WM_DEBUG
   3642 	if (sc->sc_trigger_reset) {
   3643 		/* debug operation, no need for atomicity or reliability */
   3644 		sc->sc_trigger_reset = 0;
   3645 		hang_queue++;
   3646 	}
   3647 #endif
   3648 
   3649 	if (hang_queue == 0)
   3650 		return true;
   3651 
   3652 	if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
   3653 		workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
   3654 
   3655 	return false;
   3656 }
   3657 
   3658 /*
   3659  * Perform an interface watchdog reset.
   3660  */
   3661 static void
   3662 wm_handle_reset_work(struct work *work, void *arg)
   3663 {
   3664 	struct wm_softc * const sc = arg;
   3665 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
   3666 
   3667 	/* Don't want ioctl operations to happen */
   3668 	IFNET_LOCK(ifp);
   3669 
   3670 	/* reset the interface. */
   3671 	wm_init(ifp);
   3672 
   3673 	IFNET_UNLOCK(ifp);
   3674 
   3675 	/*
   3676 	 * There are still some upper layer processing which call
   3677 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   3678 	 */
   3679 	/* Try to get more packets going. */
   3680 	ifp->if_start(ifp);
   3681 
   3682 	atomic_store_relaxed(&sc->sc_reset_pending, 0);
   3683 }
   3684 
   3685 
   3686 static void
   3687 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3688 {
   3689 
   3690 	mutex_enter(txq->txq_lock);
   3691 	if (txq->txq_sending &&
   3692 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3693 		wm_watchdog_txq_locked(ifp, txq, hang);
   3694 
   3695 	mutex_exit(txq->txq_lock);
   3696 }
   3697 
   3698 static void
   3699 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3700     uint16_t *hang)
   3701 {
   3702 	struct wm_softc *sc = ifp->if_softc;
   3703 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3704 
   3705 	KASSERT(mutex_owned(txq->txq_lock));
   3706 
   3707 	/*
   3708 	 * Since we're using delayed interrupts, sweep up
   3709 	 * before we report an error.
   3710 	 */
   3711 	wm_txeof(txq, UINT_MAX);
   3712 
   3713 	if (txq->txq_sending)
   3714 		*hang |= __BIT(wmq->wmq_id);
   3715 
   3716 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3717 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3718 		    device_xname(sc->sc_dev));
   3719 	} else {
   3720 #ifdef WM_DEBUG
   3721 		int i, j;
   3722 		struct wm_txsoft *txs;
   3723 #endif
   3724 		log(LOG_ERR,
   3725 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3726 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3727 		    txq->txq_next);
   3728 		if_statinc(ifp, if_oerrors);
   3729 #ifdef WM_DEBUG
   3730 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3731 		    i = WM_NEXTTXS(txq, i)) {
   3732 			txs = &txq->txq_soft[i];
   3733 			printf("txs %d tx %d -> %d\n",
   3734 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3735 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3736 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3737 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3738 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3739 					printf("\t %#08x%08x\n",
   3740 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3741 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3742 				} else {
   3743 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3744 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3745 					    txq->txq_descs[j].wtx_addr.wa_low);
   3746 					printf("\t %#04x%02x%02x%08x\n",
   3747 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3748 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3749 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3750 					    txq->txq_descs[j].wtx_cmdlen);
   3751 				}
   3752 				if (j == txs->txs_lastdesc)
   3753 					break;
   3754 			}
   3755 		}
   3756 #endif
   3757 	}
   3758 }
   3759 
   3760 /*
   3761  * wm_tick:
   3762  *
   3763  *	One second timer, used to check link status, sweep up
   3764  *	completed transmit jobs, etc.
   3765  */
   3766 static void
   3767 wm_tick(void *arg)
   3768 {
   3769 	struct wm_softc *sc = arg;
   3770 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3771 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
   3772 	    cexterr;
   3773 #ifndef WM_MPSAFE
   3774 	int s = splnet();
   3775 #endif
   3776 
   3777 	WM_CORE_LOCK(sc);
   3778 
   3779 	if (sc->sc_core_stopping) {
   3780 		WM_CORE_UNLOCK(sc);
   3781 #ifndef WM_MPSAFE
   3782 		splx(s);
   3783 #endif
   3784 		return;
   3785 	}
   3786 
   3787 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
   3788 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
   3789 	mpc = CSR_READ(sc, WMREG_MPC);
   3790 	colc = CSR_READ(sc, WMREG_COLC);
   3791 	sec = CSR_READ(sc, WMREG_SEC);
   3792 	rlec = CSR_READ(sc, WMREG_RLEC);
   3793 
   3794 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
   3795 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
   3796 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
   3797 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
   3798 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
   3799 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
   3800 
   3801 	if (sc->sc_type >= WM_T_82542_2_1) {
   3802 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3803 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3804 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3805 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3806 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3807 	}
   3808 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
   3809 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
   3810 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
   3811 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
   3812 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
   3813 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
   3814 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
   3815 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
   3816 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
   3817 
   3818 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
   3819 	    CSR_READ(sc, WMREG_GORCL) + CSR_READ(sc, WMREG_GORCH));
   3820 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
   3821 	    CSR_READ(sc, WMREG_GOTCL) + CSR_READ(sc, WMREG_GOTCH));
   3822 
   3823 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
   3824 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
   3825 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
   3826 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
   3827 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
   3828 
   3829 	WM_EVCNT_ADD(&sc->sc_ev_tor,
   3830 	    CSR_READ(sc, WMREG_TORL) + CSR_READ(sc, WMREG_TORH));
   3831 	WM_EVCNT_ADD(&sc->sc_ev_tot,
   3832 	    CSR_READ(sc, WMREG_TOTL) + CSR_READ(sc, WMREG_TOTH));
   3833 
   3834 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
   3835 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
   3836 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
   3837 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
   3838 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
   3839 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
   3840 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
   3841 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
   3842 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
   3843 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
   3844 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
   3845 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
   3846 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
   3847 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
   3848 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
   3849 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
   3850 	WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
   3851 	WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
   3852 	WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
   3853 	WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
   3854 	WM_EVCNT_ADD(&sc->sc_ev_ictxact, CSR_READ(sc, WMREG_ICTXATC));
   3855 	WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
   3856 	WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc, CSR_READ(sc, WMREG_ICTXQMTC));
   3857 	WM_EVCNT_ADD(&sc->sc_ev_icrxdmtc, CSR_READ(sc, WMREG_ICRXDMTC));
   3858 	WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
   3859 
   3860 	if (sc->sc_type >= WM_T_82543) {
   3861 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
   3862 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
   3863 		cexterr = CSR_READ(sc, WMREG_CEXTERR);
   3864 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
   3865 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
   3866 		WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
   3867 
   3868 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
   3869 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
   3870 		WM_EVCNT_ADD(&sc->sc_ev_tsctfc, CSR_READ(sc, WMREG_TSCTFC));
   3871 	} else
   3872 		algnerrc = rxerrc = cexterr = 0;
   3873 
   3874 	if (sc->sc_type >= WM_T_82540) {
   3875 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
   3876 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
   3877 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
   3878 	}
   3879 	if (((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003))
   3880 	    && ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0)) {
   3881 		WM_EVCNT_ADD(&sc->sc_ev_b2ogprc, CSR_READ(sc, WMREG_B2OGPRC));
   3882 		WM_EVCNT_ADD(&sc->sc_ev_o2bspc, CSR_READ(sc, WMREG_O2BSPC));
   3883 		WM_EVCNT_ADD(&sc->sc_ev_b2ospc, CSR_READ(sc, WMREG_B2OSPC));
   3884 		WM_EVCNT_ADD(&sc->sc_ev_o2bgptc, CSR_READ(sc, WMREG_O2BGPTC));
   3885 	}
   3886 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3887 	if_statadd_ref(nsr, if_collisions, colc);
   3888 	if_statadd_ref(nsr, if_ierrors,
   3889 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
   3890 	/*
   3891 	 * WMREG_RNBC is incremented when there are no available buffers in host
   3892 	 * memory. It does not mean the number of dropped packets, because an
   3893 	 * Ethernet controller can receive packets in such case if there is
   3894 	 * space in the phy's FIFO.
   3895 	 *
   3896 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3897 	 * own EVCNT instead of if_iqdrops.
   3898 	 */
   3899 	if_statadd_ref(nsr, if_iqdrops, mpc);
   3900 	IF_STAT_PUTREF(ifp);
   3901 
   3902 	if (sc->sc_flags & WM_F_HAS_MII)
   3903 		mii_tick(&sc->sc_mii);
   3904 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3905 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3906 		wm_serdes_tick(sc);
   3907 	else
   3908 		wm_tbi_tick(sc);
   3909 
   3910 	WM_CORE_UNLOCK(sc);
   3911 #ifndef WM_MPSAFE
   3912 	splx(s);
   3913 #endif
   3914 
   3915 	if (wm_watchdog(ifp))
   3916 		callout_schedule(&sc->sc_tick_ch, hz);
   3917 }
   3918 
   3919 static int
   3920 wm_ifflags_cb(struct ethercom *ec)
   3921 {
   3922 	struct ifnet *ifp = &ec->ec_if;
   3923 	struct wm_softc *sc = ifp->if_softc;
   3924 	u_short iffchange;
   3925 	int ecchange;
   3926 	bool needreset = false;
   3927 	int rc = 0;
   3928 
   3929 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3930 		device_xname(sc->sc_dev), __func__));
   3931 
   3932 	KASSERT(IFNET_LOCKED(ifp));
   3933 	WM_CORE_LOCK(sc);
   3934 
   3935 	/*
   3936 	 * Check for if_flags.
   3937 	 * Main usage is to prevent linkdown when opening bpf.
   3938 	 */
   3939 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3940 	sc->sc_if_flags = ifp->if_flags;
   3941 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3942 		needreset = true;
   3943 		goto ec;
   3944 	}
   3945 
   3946 	/* iff related updates */
   3947 	if ((iffchange & IFF_PROMISC) != 0)
   3948 		wm_set_filter(sc);
   3949 
   3950 	wm_set_vlan(sc);
   3951 
   3952 ec:
   3953 	/* Check for ec_capenable. */
   3954 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3955 	sc->sc_ec_capenable = ec->ec_capenable;
   3956 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3957 		needreset = true;
   3958 		goto out;
   3959 	}
   3960 
   3961 	/* ec related updates */
   3962 	wm_set_eee(sc);
   3963 
   3964 out:
   3965 	if (needreset)
   3966 		rc = ENETRESET;
   3967 	WM_CORE_UNLOCK(sc);
   3968 
   3969 	return rc;
   3970 }
   3971 
   3972 static bool
   3973 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3974 {
   3975 
   3976 	switch (sc->sc_phytype) {
   3977 	case WMPHY_82577: /* ihphy */
   3978 	case WMPHY_82578: /* atphy */
   3979 	case WMPHY_82579: /* ihphy */
   3980 	case WMPHY_I217: /* ihphy */
   3981 	case WMPHY_82580: /* ihphy */
   3982 	case WMPHY_I350: /* ihphy */
   3983 		return true;
   3984 	default:
   3985 		return false;
   3986 	}
   3987 }
   3988 
   3989 static void
   3990 wm_set_linkdown_discard(struct wm_softc *sc)
   3991 {
   3992 
   3993 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3994 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3995 
   3996 		mutex_enter(txq->txq_lock);
   3997 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3998 		mutex_exit(txq->txq_lock);
   3999 	}
   4000 }
   4001 
   4002 static void
   4003 wm_clear_linkdown_discard(struct wm_softc *sc)
   4004 {
   4005 
   4006 	for (int i = 0; i < sc->sc_nqueues; i++) {
   4007 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4008 
   4009 		mutex_enter(txq->txq_lock);
   4010 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   4011 		mutex_exit(txq->txq_lock);
   4012 	}
   4013 }
   4014 
   4015 /*
   4016  * wm_ioctl:		[ifnet interface function]
   4017  *
   4018  *	Handle control requests from the operator.
   4019  */
   4020 static int
   4021 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   4022 {
   4023 	struct wm_softc *sc = ifp->if_softc;
   4024 	struct ifreq *ifr = (struct ifreq *)data;
   4025 	struct ifaddr *ifa = (struct ifaddr *)data;
   4026 	struct sockaddr_dl *sdl;
   4027 	int error;
   4028 
   4029 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4030 		device_xname(sc->sc_dev), __func__));
   4031 
   4032 	switch (cmd) {
   4033 	case SIOCADDMULTI:
   4034 	case SIOCDELMULTI:
   4035 		break;
   4036 	default:
   4037 		KASSERT(IFNET_LOCKED(ifp));
   4038 	}
   4039 
   4040 #ifndef WM_MPSAFE
   4041 	const int s = splnet();
   4042 #endif
   4043 	switch (cmd) {
   4044 	case SIOCSIFMEDIA:
   4045 		WM_CORE_LOCK(sc);
   4046 		/* Flow control requires full-duplex mode. */
   4047 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   4048 		    (ifr->ifr_media & IFM_FDX) == 0)
   4049 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   4050 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   4051 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   4052 				/* We can do both TXPAUSE and RXPAUSE. */
   4053 				ifr->ifr_media |=
   4054 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   4055 			}
   4056 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   4057 		}
   4058 		WM_CORE_UNLOCK(sc);
   4059 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   4060 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   4061 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
   4062 				DPRINTF(sc, WM_DEBUG_LINK,
   4063 				    ("%s: %s: Set linkdown discard flag\n",
   4064 					device_xname(sc->sc_dev), __func__));
   4065 				wm_set_linkdown_discard(sc);
   4066 			}
   4067 		}
   4068 		break;
   4069 	case SIOCINITIFADDR:
   4070 		WM_CORE_LOCK(sc);
   4071 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   4072 			sdl = satosdl(ifp->if_dl->ifa_addr);
   4073 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   4074 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   4075 			/* Unicast address is the first multicast entry */
   4076 			wm_set_filter(sc);
   4077 			error = 0;
   4078 			WM_CORE_UNLOCK(sc);
   4079 			break;
   4080 		}
   4081 		WM_CORE_UNLOCK(sc);
   4082 		/*FALLTHROUGH*/
   4083 	default:
   4084 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   4085 			if (((ifp->if_flags & IFF_UP) != 0) &&
   4086 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
   4087 				DPRINTF(sc, WM_DEBUG_LINK,
   4088 				    ("%s: %s: Set linkdown discard flag\n",
   4089 					device_xname(sc->sc_dev), __func__));
   4090 				wm_set_linkdown_discard(sc);
   4091 			}
   4092 		}
   4093 #ifdef WM_MPSAFE
   4094 		const int s = splnet();
   4095 #endif
   4096 		/* It may call wm_start, so unlock here */
   4097 		error = ether_ioctl(ifp, cmd, data);
   4098 #ifdef WM_MPSAFE
   4099 		splx(s);
   4100 #endif
   4101 		if (error != ENETRESET)
   4102 			break;
   4103 
   4104 		error = 0;
   4105 
   4106 		if (cmd == SIOCSIFCAP)
   4107 			error = if_init(ifp);
   4108 		else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
   4109 			WM_CORE_LOCK(sc);
   4110 			if (sc->sc_if_flags & IFF_RUNNING) {
   4111 				/*
   4112 				 * Multicast list has changed; set the hardware filter
   4113 				 * accordingly.
   4114 				 */
   4115 				wm_set_filter(sc);
   4116 			}
   4117 			WM_CORE_UNLOCK(sc);
   4118 		}
   4119 		break;
   4120 	}
   4121 
   4122 #ifndef WM_MPSAFE
   4123 	splx(s);
   4124 #endif
   4125 	return error;
   4126 }
   4127 
   4128 /* MAC address related */
   4129 
   4130 /*
   4131  * Get the offset of MAC address and return it.
   4132  * If error occured, use offset 0.
   4133  */
   4134 static uint16_t
   4135 wm_check_alt_mac_addr(struct wm_softc *sc)
   4136 {
   4137 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4138 	uint16_t offset = NVM_OFF_MACADDR;
   4139 
   4140 	/* Try to read alternative MAC address pointer */
   4141 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   4142 		return 0;
   4143 
   4144 	/* Check pointer if it's valid or not. */
   4145 	if ((offset == 0x0000) || (offset == 0xffff))
   4146 		return 0;
   4147 
   4148 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   4149 	/*
   4150 	 * Check whether alternative MAC address is valid or not.
   4151 	 * Some cards have non 0xffff pointer but those don't use
   4152 	 * alternative MAC address in reality.
   4153 	 *
   4154 	 * Check whether the broadcast bit is set or not.
   4155 	 */
   4156 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   4157 		if (((myea[0] & 0xff) & 0x01) == 0)
   4158 			return offset; /* Found */
   4159 
   4160 	/* Not found */
   4161 	return 0;
   4162 }
   4163 
   4164 static int
   4165 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   4166 {
   4167 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4168 	uint16_t offset = NVM_OFF_MACADDR;
   4169 	int do_invert = 0;
   4170 
   4171 	switch (sc->sc_type) {
   4172 	case WM_T_82580:
   4173 	case WM_T_I350:
   4174 	case WM_T_I354:
   4175 		/* EEPROM Top Level Partitioning */
   4176 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   4177 		break;
   4178 	case WM_T_82571:
   4179 	case WM_T_82575:
   4180 	case WM_T_82576:
   4181 	case WM_T_80003:
   4182 	case WM_T_I210:
   4183 	case WM_T_I211:
   4184 		offset = wm_check_alt_mac_addr(sc);
   4185 		if (offset == 0)
   4186 			if ((sc->sc_funcid & 0x01) == 1)
   4187 				do_invert = 1;
   4188 		break;
   4189 	default:
   4190 		if ((sc->sc_funcid & 0x01) == 1)
   4191 			do_invert = 1;
   4192 		break;
   4193 	}
   4194 
   4195 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   4196 		goto bad;
   4197 
   4198 	enaddr[0] = myea[0] & 0xff;
   4199 	enaddr[1] = myea[0] >> 8;
   4200 	enaddr[2] = myea[1] & 0xff;
   4201 	enaddr[3] = myea[1] >> 8;
   4202 	enaddr[4] = myea[2] & 0xff;
   4203 	enaddr[5] = myea[2] >> 8;
   4204 
   4205 	/*
   4206 	 * Toggle the LSB of the MAC address on the second port
   4207 	 * of some dual port cards.
   4208 	 */
   4209 	if (do_invert != 0)
   4210 		enaddr[5] ^= 1;
   4211 
   4212 	return 0;
   4213 
   4214  bad:
   4215 	return -1;
   4216 }
   4217 
   4218 /*
   4219  * wm_set_ral:
   4220  *
   4221  *	Set an entery in the receive address list.
   4222  */
   4223 static void
   4224 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   4225 {
   4226 	uint32_t ral_lo, ral_hi, addrl, addrh;
   4227 	uint32_t wlock_mac;
   4228 	int rv;
   4229 
   4230 	if (enaddr != NULL) {
   4231 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   4232 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   4233 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   4234 		ral_hi |= RAL_AV;
   4235 	} else {
   4236 		ral_lo = 0;
   4237 		ral_hi = 0;
   4238 	}
   4239 
   4240 	switch (sc->sc_type) {
   4241 	case WM_T_82542_2_0:
   4242 	case WM_T_82542_2_1:
   4243 	case WM_T_82543:
   4244 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   4245 		CSR_WRITE_FLUSH(sc);
   4246 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   4247 		CSR_WRITE_FLUSH(sc);
   4248 		break;
   4249 	case WM_T_PCH2:
   4250 	case WM_T_PCH_LPT:
   4251 	case WM_T_PCH_SPT:
   4252 	case WM_T_PCH_CNP:
   4253 		if (idx == 0) {
   4254 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4255 			CSR_WRITE_FLUSH(sc);
   4256 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4257 			CSR_WRITE_FLUSH(sc);
   4258 			return;
   4259 		}
   4260 		if (sc->sc_type != WM_T_PCH2) {
   4261 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   4262 			    FWSM_WLOCK_MAC);
   4263 			addrl = WMREG_SHRAL(idx - 1);
   4264 			addrh = WMREG_SHRAH(idx - 1);
   4265 		} else {
   4266 			wlock_mac = 0;
   4267 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   4268 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   4269 		}
   4270 
   4271 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   4272 			rv = wm_get_swflag_ich8lan(sc);
   4273 			if (rv != 0)
   4274 				return;
   4275 			CSR_WRITE(sc, addrl, ral_lo);
   4276 			CSR_WRITE_FLUSH(sc);
   4277 			CSR_WRITE(sc, addrh, ral_hi);
   4278 			CSR_WRITE_FLUSH(sc);
   4279 			wm_put_swflag_ich8lan(sc);
   4280 		}
   4281 
   4282 		break;
   4283 	default:
   4284 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4285 		CSR_WRITE_FLUSH(sc);
   4286 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4287 		CSR_WRITE_FLUSH(sc);
   4288 		break;
   4289 	}
   4290 }
   4291 
   4292 /*
   4293  * wm_mchash:
   4294  *
   4295  *	Compute the hash of the multicast address for the 4096-bit
   4296  *	multicast filter.
   4297  */
   4298 static uint32_t
   4299 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   4300 {
   4301 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   4302 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   4303 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   4304 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   4305 	uint32_t hash;
   4306 
   4307 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4308 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4309 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4310 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4311 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   4312 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   4313 		return (hash & 0x3ff);
   4314 	}
   4315 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   4316 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   4317 
   4318 	return (hash & 0xfff);
   4319 }
   4320 
   4321 /*
   4322  *
   4323  *
   4324  */
   4325 static int
   4326 wm_rar_count(struct wm_softc *sc)
   4327 {
   4328 	int size;
   4329 
   4330 	switch (sc->sc_type) {
   4331 	case WM_T_ICH8:
   4332 		size = WM_RAL_TABSIZE_ICH8 -1;
   4333 		break;
   4334 	case WM_T_ICH9:
   4335 	case WM_T_ICH10:
   4336 	case WM_T_PCH:
   4337 		size = WM_RAL_TABSIZE_ICH8;
   4338 		break;
   4339 	case WM_T_PCH2:
   4340 		size = WM_RAL_TABSIZE_PCH2;
   4341 		break;
   4342 	case WM_T_PCH_LPT:
   4343 	case WM_T_PCH_SPT:
   4344 	case WM_T_PCH_CNP:
   4345 		size = WM_RAL_TABSIZE_PCH_LPT;
   4346 		break;
   4347 	case WM_T_82575:
   4348 	case WM_T_I210:
   4349 	case WM_T_I211:
   4350 		size = WM_RAL_TABSIZE_82575;
   4351 		break;
   4352 	case WM_T_82576:
   4353 	case WM_T_82580:
   4354 		size = WM_RAL_TABSIZE_82576;
   4355 		break;
   4356 	case WM_T_I350:
   4357 	case WM_T_I354:
   4358 		size = WM_RAL_TABSIZE_I350;
   4359 		break;
   4360 	default:
   4361 		size = WM_RAL_TABSIZE;
   4362 	}
   4363 
   4364 	return size;
   4365 }
   4366 
   4367 /*
   4368  * wm_set_filter:
   4369  *
   4370  *	Set up the receive filter.
   4371  */
   4372 static void
   4373 wm_set_filter(struct wm_softc *sc)
   4374 {
   4375 	struct ethercom *ec = &sc->sc_ethercom;
   4376 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   4377 	struct ether_multi *enm;
   4378 	struct ether_multistep step;
   4379 	bus_addr_t mta_reg;
   4380 	uint32_t hash, reg, bit;
   4381 	int i, size, ralmax, rv;
   4382 
   4383 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4384 		device_xname(sc->sc_dev), __func__));
   4385 	KASSERT(WM_CORE_LOCKED(sc));
   4386 
   4387 	if (sc->sc_type >= WM_T_82544)
   4388 		mta_reg = WMREG_CORDOVA_MTA;
   4389 	else
   4390 		mta_reg = WMREG_MTA;
   4391 
   4392 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   4393 
   4394 	if (sc->sc_if_flags & IFF_BROADCAST)
   4395 		sc->sc_rctl |= RCTL_BAM;
   4396 	if (sc->sc_if_flags & IFF_PROMISC) {
   4397 		sc->sc_rctl |= RCTL_UPE;
   4398 		ETHER_LOCK(ec);
   4399 		ec->ec_flags |= ETHER_F_ALLMULTI;
   4400 		ETHER_UNLOCK(ec);
   4401 		goto allmulti;
   4402 	}
   4403 
   4404 	/*
   4405 	 * Set the station address in the first RAL slot, and
   4406 	 * clear the remaining slots.
   4407 	 */
   4408 	size = wm_rar_count(sc);
   4409 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   4410 
   4411 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   4412 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   4413 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   4414 		switch (i) {
   4415 		case 0:
   4416 			/* We can use all entries */
   4417 			ralmax = size;
   4418 			break;
   4419 		case 1:
   4420 			/* Only RAR[0] */
   4421 			ralmax = 1;
   4422 			break;
   4423 		default:
   4424 			/* Available SHRA + RAR[0] */
   4425 			ralmax = i + 1;
   4426 		}
   4427 	} else
   4428 		ralmax = size;
   4429 	for (i = 1; i < size; i++) {
   4430 		if (i < ralmax)
   4431 			wm_set_ral(sc, NULL, i);
   4432 	}
   4433 
   4434 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4435 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4436 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4437 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   4438 		size = WM_ICH8_MC_TABSIZE;
   4439 	else
   4440 		size = WM_MC_TABSIZE;
   4441 	/* Clear out the multicast table. */
   4442 	for (i = 0; i < size; i++) {
   4443 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4444 		CSR_WRITE_FLUSH(sc);
   4445 	}
   4446 
   4447 	ETHER_LOCK(ec);
   4448 	ETHER_FIRST_MULTI(step, ec, enm);
   4449 	while (enm != NULL) {
   4450 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4451 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4452 			ETHER_UNLOCK(ec);
   4453 			/*
   4454 			 * We must listen to a range of multicast addresses.
   4455 			 * For now, just accept all multicasts, rather than
   4456 			 * trying to set only those filter bits needed to match
   4457 			 * the range.  (At this time, the only use of address
   4458 			 * ranges is for IP multicast routing, for which the
   4459 			 * range is big enough to require all bits set.)
   4460 			 */
   4461 			goto allmulti;
   4462 		}
   4463 
   4464 		hash = wm_mchash(sc, enm->enm_addrlo);
   4465 
   4466 		reg = (hash >> 5);
   4467 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4468 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4469 		    || (sc->sc_type == WM_T_PCH2)
   4470 		    || (sc->sc_type == WM_T_PCH_LPT)
   4471 		    || (sc->sc_type == WM_T_PCH_SPT)
   4472 		    || (sc->sc_type == WM_T_PCH_CNP))
   4473 			reg &= 0x1f;
   4474 		else
   4475 			reg &= 0x7f;
   4476 		bit = hash & 0x1f;
   4477 
   4478 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4479 		hash |= 1U << bit;
   4480 
   4481 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4482 			/*
   4483 			 * 82544 Errata 9: Certain register cannot be written
   4484 			 * with particular alignments in PCI-X bus operation
   4485 			 * (FCAH, MTA and VFTA).
   4486 			 */
   4487 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4488 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4489 			CSR_WRITE_FLUSH(sc);
   4490 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4491 			CSR_WRITE_FLUSH(sc);
   4492 		} else {
   4493 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4494 			CSR_WRITE_FLUSH(sc);
   4495 		}
   4496 
   4497 		ETHER_NEXT_MULTI(step, enm);
   4498 	}
   4499 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4500 	ETHER_UNLOCK(ec);
   4501 
   4502 	goto setit;
   4503 
   4504  allmulti:
   4505 	sc->sc_rctl |= RCTL_MPE;
   4506 
   4507  setit:
   4508 	if (sc->sc_type >= WM_T_PCH2) {
   4509 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4510 		    && (ifp->if_mtu > ETHERMTU))
   4511 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4512 		else
   4513 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4514 		if (rv != 0)
   4515 			device_printf(sc->sc_dev,
   4516 			    "Failed to do workaround for jumbo frame.\n");
   4517 	}
   4518 
   4519 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4520 }
   4521 
   4522 /* Reset and init related */
   4523 
   4524 static void
   4525 wm_set_vlan(struct wm_softc *sc)
   4526 {
   4527 
   4528 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4529 		device_xname(sc->sc_dev), __func__));
   4530 
   4531 	/* Deal with VLAN enables. */
   4532 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4533 		sc->sc_ctrl |= CTRL_VME;
   4534 	else
   4535 		sc->sc_ctrl &= ~CTRL_VME;
   4536 
   4537 	/* Write the control registers. */
   4538 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4539 }
   4540 
   4541 static void
   4542 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4543 {
   4544 	uint32_t gcr;
   4545 	pcireg_t ctrl2;
   4546 
   4547 	gcr = CSR_READ(sc, WMREG_GCR);
   4548 
   4549 	/* Only take action if timeout value is defaulted to 0 */
   4550 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4551 		goto out;
   4552 
   4553 	if ((gcr & GCR_CAP_VER2) == 0) {
   4554 		gcr |= GCR_CMPL_TMOUT_10MS;
   4555 		goto out;
   4556 	}
   4557 
   4558 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4559 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4560 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4561 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4562 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4563 
   4564 out:
   4565 	/* Disable completion timeout resend */
   4566 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4567 
   4568 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4569 }
   4570 
   4571 void
   4572 wm_get_auto_rd_done(struct wm_softc *sc)
   4573 {
   4574 	int i;
   4575 
   4576 	/* wait for eeprom to reload */
   4577 	switch (sc->sc_type) {
   4578 	case WM_T_82571:
   4579 	case WM_T_82572:
   4580 	case WM_T_82573:
   4581 	case WM_T_82574:
   4582 	case WM_T_82583:
   4583 	case WM_T_82575:
   4584 	case WM_T_82576:
   4585 	case WM_T_82580:
   4586 	case WM_T_I350:
   4587 	case WM_T_I354:
   4588 	case WM_T_I210:
   4589 	case WM_T_I211:
   4590 	case WM_T_80003:
   4591 	case WM_T_ICH8:
   4592 	case WM_T_ICH9:
   4593 		for (i = 0; i < 10; i++) {
   4594 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4595 				break;
   4596 			delay(1000);
   4597 		}
   4598 		if (i == 10) {
   4599 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4600 			    "complete\n", device_xname(sc->sc_dev));
   4601 		}
   4602 		break;
   4603 	default:
   4604 		break;
   4605 	}
   4606 }
   4607 
   4608 void
   4609 wm_lan_init_done(struct wm_softc *sc)
   4610 {
   4611 	uint32_t reg = 0;
   4612 	int i;
   4613 
   4614 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4615 		device_xname(sc->sc_dev), __func__));
   4616 
   4617 	/* Wait for eeprom to reload */
   4618 	switch (sc->sc_type) {
   4619 	case WM_T_ICH10:
   4620 	case WM_T_PCH:
   4621 	case WM_T_PCH2:
   4622 	case WM_T_PCH_LPT:
   4623 	case WM_T_PCH_SPT:
   4624 	case WM_T_PCH_CNP:
   4625 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4626 			reg = CSR_READ(sc, WMREG_STATUS);
   4627 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4628 				break;
   4629 			delay(100);
   4630 		}
   4631 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4632 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4633 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4634 		}
   4635 		break;
   4636 	default:
   4637 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4638 		    __func__);
   4639 		break;
   4640 	}
   4641 
   4642 	reg &= ~STATUS_LAN_INIT_DONE;
   4643 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4644 }
   4645 
   4646 void
   4647 wm_get_cfg_done(struct wm_softc *sc)
   4648 {
   4649 	int mask;
   4650 	uint32_t reg;
   4651 	int i;
   4652 
   4653 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4654 		device_xname(sc->sc_dev), __func__));
   4655 
   4656 	/* Wait for eeprom to reload */
   4657 	switch (sc->sc_type) {
   4658 	case WM_T_82542_2_0:
   4659 	case WM_T_82542_2_1:
   4660 		/* null */
   4661 		break;
   4662 	case WM_T_82543:
   4663 	case WM_T_82544:
   4664 	case WM_T_82540:
   4665 	case WM_T_82545:
   4666 	case WM_T_82545_3:
   4667 	case WM_T_82546:
   4668 	case WM_T_82546_3:
   4669 	case WM_T_82541:
   4670 	case WM_T_82541_2:
   4671 	case WM_T_82547:
   4672 	case WM_T_82547_2:
   4673 	case WM_T_82573:
   4674 	case WM_T_82574:
   4675 	case WM_T_82583:
   4676 		/* generic */
   4677 		delay(10*1000);
   4678 		break;
   4679 	case WM_T_80003:
   4680 	case WM_T_82571:
   4681 	case WM_T_82572:
   4682 	case WM_T_82575:
   4683 	case WM_T_82576:
   4684 	case WM_T_82580:
   4685 	case WM_T_I350:
   4686 	case WM_T_I354:
   4687 	case WM_T_I210:
   4688 	case WM_T_I211:
   4689 		if (sc->sc_type == WM_T_82571) {
   4690 			/* Only 82571 shares port 0 */
   4691 			mask = EEMNGCTL_CFGDONE_0;
   4692 		} else
   4693 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4694 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4695 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4696 				break;
   4697 			delay(1000);
   4698 		}
   4699 		if (i >= WM_PHY_CFG_TIMEOUT)
   4700 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4701 				device_xname(sc->sc_dev), __func__));
   4702 		break;
   4703 	case WM_T_ICH8:
   4704 	case WM_T_ICH9:
   4705 	case WM_T_ICH10:
   4706 	case WM_T_PCH:
   4707 	case WM_T_PCH2:
   4708 	case WM_T_PCH_LPT:
   4709 	case WM_T_PCH_SPT:
   4710 	case WM_T_PCH_CNP:
   4711 		delay(10*1000);
   4712 		if (sc->sc_type >= WM_T_ICH10)
   4713 			wm_lan_init_done(sc);
   4714 		else
   4715 			wm_get_auto_rd_done(sc);
   4716 
   4717 		/* Clear PHY Reset Asserted bit */
   4718 		reg = CSR_READ(sc, WMREG_STATUS);
   4719 		if ((reg & STATUS_PHYRA) != 0)
   4720 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4721 		break;
   4722 	default:
   4723 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4724 		    __func__);
   4725 		break;
   4726 	}
   4727 }
   4728 
   4729 int
   4730 wm_phy_post_reset(struct wm_softc *sc)
   4731 {
   4732 	device_t dev = sc->sc_dev;
   4733 	uint16_t reg;
   4734 	int rv = 0;
   4735 
   4736 	/* This function is only for ICH8 and newer. */
   4737 	if (sc->sc_type < WM_T_ICH8)
   4738 		return 0;
   4739 
   4740 	if (wm_phy_resetisblocked(sc)) {
   4741 		/* XXX */
   4742 		device_printf(dev, "PHY is blocked\n");
   4743 		return -1;
   4744 	}
   4745 
   4746 	/* Allow time for h/w to get to quiescent state after reset */
   4747 	delay(10*1000);
   4748 
   4749 	/* Perform any necessary post-reset workarounds */
   4750 	if (sc->sc_type == WM_T_PCH)
   4751 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4752 	else if (sc->sc_type == WM_T_PCH2)
   4753 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4754 	if (rv != 0)
   4755 		return rv;
   4756 
   4757 	/* Clear the host wakeup bit after lcd reset */
   4758 	if (sc->sc_type >= WM_T_PCH) {
   4759 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4760 		reg &= ~BM_WUC_HOST_WU_BIT;
   4761 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4762 	}
   4763 
   4764 	/* Configure the LCD with the extended configuration region in NVM */
   4765 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4766 		return rv;
   4767 
   4768 	/* Configure the LCD with the OEM bits in NVM */
   4769 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4770 
   4771 	if (sc->sc_type == WM_T_PCH2) {
   4772 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4773 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4774 			delay(10 * 1000);
   4775 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4776 		}
   4777 		/* Set EEE LPI Update Timer to 200usec */
   4778 		rv = sc->phy.acquire(sc);
   4779 		if (rv)
   4780 			return rv;
   4781 		rv = wm_write_emi_reg_locked(dev,
   4782 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4783 		sc->phy.release(sc);
   4784 	}
   4785 
   4786 	return rv;
   4787 }
   4788 
   4789 /* Only for PCH and newer */
   4790 static int
   4791 wm_write_smbus_addr(struct wm_softc *sc)
   4792 {
   4793 	uint32_t strap, freq;
   4794 	uint16_t phy_data;
   4795 	int rv;
   4796 
   4797 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4798 		device_xname(sc->sc_dev), __func__));
   4799 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4800 
   4801 	strap = CSR_READ(sc, WMREG_STRAP);
   4802 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4803 
   4804 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4805 	if (rv != 0)
   4806 		return rv;
   4807 
   4808 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4809 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4810 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4811 
   4812 	if (sc->sc_phytype == WMPHY_I217) {
   4813 		/* Restore SMBus frequency */
   4814 		if (freq --) {
   4815 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4816 			    | HV_SMB_ADDR_FREQ_HIGH);
   4817 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4818 			    HV_SMB_ADDR_FREQ_LOW);
   4819 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4820 			    HV_SMB_ADDR_FREQ_HIGH);
   4821 		} else
   4822 			DPRINTF(sc, WM_DEBUG_INIT,
   4823 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4824 				device_xname(sc->sc_dev), __func__));
   4825 	}
   4826 
   4827 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4828 	    phy_data);
   4829 }
   4830 
   4831 static int
   4832 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4833 {
   4834 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4835 	uint16_t phy_page = 0;
   4836 	int rv = 0;
   4837 
   4838 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4839 		device_xname(sc->sc_dev), __func__));
   4840 
   4841 	switch (sc->sc_type) {
   4842 	case WM_T_ICH8:
   4843 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4844 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4845 			return 0;
   4846 
   4847 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4848 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4849 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4850 			break;
   4851 		}
   4852 		/* FALLTHROUGH */
   4853 	case WM_T_PCH:
   4854 	case WM_T_PCH2:
   4855 	case WM_T_PCH_LPT:
   4856 	case WM_T_PCH_SPT:
   4857 	case WM_T_PCH_CNP:
   4858 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4859 		break;
   4860 	default:
   4861 		return 0;
   4862 	}
   4863 
   4864 	if ((rv = sc->phy.acquire(sc)) != 0)
   4865 		return rv;
   4866 
   4867 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4868 	if ((reg & sw_cfg_mask) == 0)
   4869 		goto release;
   4870 
   4871 	/*
   4872 	 * Make sure HW does not configure LCD from PHY extended configuration
   4873 	 * before SW configuration
   4874 	 */
   4875 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4876 	if ((sc->sc_type < WM_T_PCH2)
   4877 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4878 		goto release;
   4879 
   4880 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4881 		device_xname(sc->sc_dev), __func__));
   4882 	/* word_addr is in DWORD */
   4883 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4884 
   4885 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4886 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4887 	if (cnf_size == 0)
   4888 		goto release;
   4889 
   4890 	if (((sc->sc_type == WM_T_PCH)
   4891 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4892 	    || (sc->sc_type > WM_T_PCH)) {
   4893 		/*
   4894 		 * HW configures the SMBus address and LEDs when the OEM and
   4895 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4896 		 * are cleared, SW will configure them instead.
   4897 		 */
   4898 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4899 			device_xname(sc->sc_dev), __func__));
   4900 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4901 			goto release;
   4902 
   4903 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4904 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4905 		    (uint16_t)reg);
   4906 		if (rv != 0)
   4907 			goto release;
   4908 	}
   4909 
   4910 	/* Configure LCD from extended configuration region. */
   4911 	for (i = 0; i < cnf_size; i++) {
   4912 		uint16_t reg_data, reg_addr;
   4913 
   4914 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4915 			goto release;
   4916 
   4917 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4918 			goto release;
   4919 
   4920 		if (reg_addr == IGPHY_PAGE_SELECT)
   4921 			phy_page = reg_data;
   4922 
   4923 		reg_addr &= IGPHY_MAXREGADDR;
   4924 		reg_addr |= phy_page;
   4925 
   4926 		KASSERT(sc->phy.writereg_locked != NULL);
   4927 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4928 		    reg_data);
   4929 	}
   4930 
   4931 release:
   4932 	sc->phy.release(sc);
   4933 	return rv;
   4934 }
   4935 
   4936 /*
   4937  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4938  *  @sc:       pointer to the HW structure
   4939  *  @d0_state: boolean if entering d0 or d3 device state
   4940  *
   4941  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4942  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4943  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4944  */
   4945 int
   4946 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4947 {
   4948 	uint32_t mac_reg;
   4949 	uint16_t oem_reg;
   4950 	int rv;
   4951 
   4952 	if (sc->sc_type < WM_T_PCH)
   4953 		return 0;
   4954 
   4955 	rv = sc->phy.acquire(sc);
   4956 	if (rv != 0)
   4957 		return rv;
   4958 
   4959 	if (sc->sc_type == WM_T_PCH) {
   4960 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4961 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4962 			goto release;
   4963 	}
   4964 
   4965 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4966 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4967 		goto release;
   4968 
   4969 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4970 
   4971 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4972 	if (rv != 0)
   4973 		goto release;
   4974 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4975 
   4976 	if (d0_state) {
   4977 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4978 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4979 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4980 			oem_reg |= HV_OEM_BITS_LPLU;
   4981 	} else {
   4982 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4983 		    != 0)
   4984 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4985 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4986 		    != 0)
   4987 			oem_reg |= HV_OEM_BITS_LPLU;
   4988 	}
   4989 
   4990 	/* Set Restart auto-neg to activate the bits */
   4991 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4992 	    && (wm_phy_resetisblocked(sc) == false))
   4993 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4994 
   4995 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4996 
   4997 release:
   4998 	sc->phy.release(sc);
   4999 
   5000 	return rv;
   5001 }
   5002 
   5003 /* Init hardware bits */
   5004 void
   5005 wm_initialize_hardware_bits(struct wm_softc *sc)
   5006 {
   5007 	uint32_t tarc0, tarc1, reg;
   5008 
   5009 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5010 		device_xname(sc->sc_dev), __func__));
   5011 
   5012 	/* For 82571 variant, 80003 and ICHs */
   5013 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   5014 	    || (sc->sc_type >= WM_T_80003)) {
   5015 
   5016 		/* Transmit Descriptor Control 0 */
   5017 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   5018 		reg |= TXDCTL_COUNT_DESC;
   5019 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   5020 
   5021 		/* Transmit Descriptor Control 1 */
   5022 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   5023 		reg |= TXDCTL_COUNT_DESC;
   5024 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   5025 
   5026 		/* TARC0 */
   5027 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   5028 		switch (sc->sc_type) {
   5029 		case WM_T_82571:
   5030 		case WM_T_82572:
   5031 		case WM_T_82573:
   5032 		case WM_T_82574:
   5033 		case WM_T_82583:
   5034 		case WM_T_80003:
   5035 			/* Clear bits 30..27 */
   5036 			tarc0 &= ~__BITS(30, 27);
   5037 			break;
   5038 		default:
   5039 			break;
   5040 		}
   5041 
   5042 		switch (sc->sc_type) {
   5043 		case WM_T_82571:
   5044 		case WM_T_82572:
   5045 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   5046 
   5047 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5048 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   5049 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   5050 			/* 8257[12] Errata No.7 */
   5051 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   5052 
   5053 			/* TARC1 bit 28 */
   5054 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5055 				tarc1 &= ~__BIT(28);
   5056 			else
   5057 				tarc1 |= __BIT(28);
   5058 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5059 
   5060 			/*
   5061 			 * 8257[12] Errata No.13
   5062 			 * Disable Dyamic Clock Gating.
   5063 			 */
   5064 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5065 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   5066 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5067 			break;
   5068 		case WM_T_82573:
   5069 		case WM_T_82574:
   5070 		case WM_T_82583:
   5071 			if ((sc->sc_type == WM_T_82574)
   5072 			    || (sc->sc_type == WM_T_82583))
   5073 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   5074 
   5075 			/* Extended Device Control */
   5076 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5077 			reg &= ~__BIT(23);	/* Clear bit 23 */
   5078 			reg |= __BIT(22);	/* Set bit 22 */
   5079 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5080 
   5081 			/* Device Control */
   5082 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   5083 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5084 
   5085 			/* PCIe Control Register */
   5086 			/*
   5087 			 * 82573 Errata (unknown).
   5088 			 *
   5089 			 * 82574 Errata 25 and 82583 Errata 12
   5090 			 * "Dropped Rx Packets":
   5091 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   5092 			 */
   5093 			reg = CSR_READ(sc, WMREG_GCR);
   5094 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   5095 			CSR_WRITE(sc, WMREG_GCR, reg);
   5096 
   5097 			if ((sc->sc_type == WM_T_82574)
   5098 			    || (sc->sc_type == WM_T_82583)) {
   5099 				/*
   5100 				 * Document says this bit must be set for
   5101 				 * proper operation.
   5102 				 */
   5103 				reg = CSR_READ(sc, WMREG_GCR);
   5104 				reg |= __BIT(22);
   5105 				CSR_WRITE(sc, WMREG_GCR, reg);
   5106 
   5107 				/*
   5108 				 * Apply workaround for hardware errata
   5109 				 * documented in errata docs Fixes issue where
   5110 				 * some error prone or unreliable PCIe
   5111 				 * completions are occurring, particularly
   5112 				 * with ASPM enabled. Without fix, issue can
   5113 				 * cause Tx timeouts.
   5114 				 */
   5115 				reg = CSR_READ(sc, WMREG_GCR2);
   5116 				reg |= __BIT(0);
   5117 				CSR_WRITE(sc, WMREG_GCR2, reg);
   5118 			}
   5119 			break;
   5120 		case WM_T_80003:
   5121 			/* TARC0 */
   5122 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   5123 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   5124 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   5125 
   5126 			/* TARC1 bit 28 */
   5127 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5128 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5129 				tarc1 &= ~__BIT(28);
   5130 			else
   5131 				tarc1 |= __BIT(28);
   5132 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5133 			break;
   5134 		case WM_T_ICH8:
   5135 		case WM_T_ICH9:
   5136 		case WM_T_ICH10:
   5137 		case WM_T_PCH:
   5138 		case WM_T_PCH2:
   5139 		case WM_T_PCH_LPT:
   5140 		case WM_T_PCH_SPT:
   5141 		case WM_T_PCH_CNP:
   5142 			/* TARC0 */
   5143 			if (sc->sc_type == WM_T_ICH8) {
   5144 				/* Set TARC0 bits 29 and 28 */
   5145 				tarc0 |= __BITS(29, 28);
   5146 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   5147 				tarc0 |= __BIT(29);
   5148 				/*
   5149 				 *  Drop bit 28. From Linux.
   5150 				 * See I218/I219 spec update
   5151 				 * "5. Buffer Overrun While the I219 is
   5152 				 * Processing DMA Transactions"
   5153 				 */
   5154 				tarc0 &= ~__BIT(28);
   5155 			}
   5156 			/* Set TARC0 bits 23,24,26,27 */
   5157 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   5158 
   5159 			/* CTRL_EXT */
   5160 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5161 			reg |= __BIT(22);	/* Set bit 22 */
   5162 			/*
   5163 			 * Enable PHY low-power state when MAC is at D3
   5164 			 * w/o WoL
   5165 			 */
   5166 			if (sc->sc_type >= WM_T_PCH)
   5167 				reg |= CTRL_EXT_PHYPDEN;
   5168 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5169 
   5170 			/* TARC1 */
   5171 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5172 			/* bit 28 */
   5173 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5174 				tarc1 &= ~__BIT(28);
   5175 			else
   5176 				tarc1 |= __BIT(28);
   5177 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   5178 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5179 
   5180 			/* Device Status */
   5181 			if (sc->sc_type == WM_T_ICH8) {
   5182 				reg = CSR_READ(sc, WMREG_STATUS);
   5183 				reg &= ~__BIT(31);
   5184 				CSR_WRITE(sc, WMREG_STATUS, reg);
   5185 
   5186 			}
   5187 
   5188 			/* IOSFPC */
   5189 			if (sc->sc_type == WM_T_PCH_SPT) {
   5190 				reg = CSR_READ(sc, WMREG_IOSFPC);
   5191 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   5192 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   5193 			}
   5194 			/*
   5195 			 * Work-around descriptor data corruption issue during
   5196 			 * NFS v2 UDP traffic, just disable the NFS filtering
   5197 			 * capability.
   5198 			 */
   5199 			reg = CSR_READ(sc, WMREG_RFCTL);
   5200 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   5201 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5202 			break;
   5203 		default:
   5204 			break;
   5205 		}
   5206 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   5207 
   5208 		switch (sc->sc_type) {
   5209 		/*
   5210 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   5211 		 * Avoid RSS Hash Value bug.
   5212 		 */
   5213 		case WM_T_82571:
   5214 		case WM_T_82572:
   5215 		case WM_T_82573:
   5216 		case WM_T_80003:
   5217 		case WM_T_ICH8:
   5218 			reg = CSR_READ(sc, WMREG_RFCTL);
   5219 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   5220 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5221 			break;
   5222 		case WM_T_82574:
   5223 			/* Use extened Rx descriptor. */
   5224 			reg = CSR_READ(sc, WMREG_RFCTL);
   5225 			reg |= WMREG_RFCTL_EXSTEN;
   5226 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5227 			break;
   5228 		default:
   5229 			break;
   5230 		}
   5231 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   5232 		/*
   5233 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   5234 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   5235 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   5236 		 * Correctly by the Device"
   5237 		 *
   5238 		 * I354(C2000) Errata AVR53:
   5239 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   5240 		 * Hang"
   5241 		 */
   5242 		reg = CSR_READ(sc, WMREG_RFCTL);
   5243 		reg |= WMREG_RFCTL_IPV6EXDIS;
   5244 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   5245 	}
   5246 }
   5247 
   5248 static uint32_t
   5249 wm_rxpbs_adjust_82580(uint32_t val)
   5250 {
   5251 	uint32_t rv = 0;
   5252 
   5253 	if (val < __arraycount(wm_82580_rxpbs_table))
   5254 		rv = wm_82580_rxpbs_table[val];
   5255 
   5256 	return rv;
   5257 }
   5258 
   5259 /*
   5260  * wm_reset_phy:
   5261  *
   5262  *	generic PHY reset function.
   5263  *	Same as e1000_phy_hw_reset_generic()
   5264  */
   5265 static int
   5266 wm_reset_phy(struct wm_softc *sc)
   5267 {
   5268 	uint32_t reg;
   5269 	int rv;
   5270 
   5271 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5272 		device_xname(sc->sc_dev), __func__));
   5273 	if (wm_phy_resetisblocked(sc))
   5274 		return -1;
   5275 
   5276 	rv = sc->phy.acquire(sc);
   5277 	if (rv) {
   5278 		device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
   5279 		    __func__, rv);
   5280 		return rv;
   5281 	}
   5282 
   5283 	reg = CSR_READ(sc, WMREG_CTRL);
   5284 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   5285 	CSR_WRITE_FLUSH(sc);
   5286 
   5287 	delay(sc->phy.reset_delay_us);
   5288 
   5289 	CSR_WRITE(sc, WMREG_CTRL, reg);
   5290 	CSR_WRITE_FLUSH(sc);
   5291 
   5292 	delay(150);
   5293 
   5294 	sc->phy.release(sc);
   5295 
   5296 	wm_get_cfg_done(sc);
   5297 	wm_phy_post_reset(sc);
   5298 
   5299 	return 0;
   5300 }
   5301 
   5302 /*
   5303  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   5304  *
   5305  * In i219, the descriptor rings must be emptied before resetting the HW
   5306  * or before changing the device state to D3 during runtime (runtime PM).
   5307  *
   5308  * Failure to do this will cause the HW to enter a unit hang state which can
   5309  * only be released by PCI reset on the device.
   5310  *
   5311  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   5312  */
   5313 static void
   5314 wm_flush_desc_rings(struct wm_softc *sc)
   5315 {
   5316 	pcireg_t preg;
   5317 	uint32_t reg;
   5318 	struct wm_txqueue *txq;
   5319 	wiseman_txdesc_t *txd;
   5320 	int nexttx;
   5321 	uint32_t rctl;
   5322 
   5323 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   5324 
   5325 	/* First, disable MULR fix in FEXTNVM11 */
   5326 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5327 	reg |= FEXTNVM11_DIS_MULRFIX;
   5328 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5329 
   5330 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5331 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   5332 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   5333 		return;
   5334 
   5335 	/*
   5336 	 * Remove all descriptors from the tx_ring.
   5337 	 *
   5338 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   5339 	 * happens when the HW reads the regs. We assign the ring itself as
   5340 	 * the data of the next descriptor. We don't care about the data we are
   5341 	 * about to reset the HW.
   5342 	 */
   5343 #ifdef WM_DEBUG
   5344 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   5345 #endif
   5346 	reg = CSR_READ(sc, WMREG_TCTL);
   5347 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   5348 
   5349 	txq = &sc->sc_queue[0].wmq_txq;
   5350 	nexttx = txq->txq_next;
   5351 	txd = &txq->txq_descs[nexttx];
   5352 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   5353 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   5354 	txd->wtx_fields.wtxu_status = 0;
   5355 	txd->wtx_fields.wtxu_options = 0;
   5356 	txd->wtx_fields.wtxu_vlan = 0;
   5357 
   5358 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5359 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5360 
   5361 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   5362 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   5363 	CSR_WRITE_FLUSH(sc);
   5364 	delay(250);
   5365 
   5366 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5367 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   5368 		return;
   5369 
   5370 	/*
   5371 	 * Mark all descriptors in the RX ring as consumed and disable the
   5372 	 * rx ring.
   5373 	 */
   5374 #ifdef WM_DEBUG
   5375 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   5376 #endif
   5377 	rctl = CSR_READ(sc, WMREG_RCTL);
   5378 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5379 	CSR_WRITE_FLUSH(sc);
   5380 	delay(150);
   5381 
   5382 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   5383 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   5384 	reg &= 0xffffc000;
   5385 	/*
   5386 	 * Update thresholds: prefetch threshold to 31, host threshold
   5387 	 * to 1 and make sure the granularity is "descriptors" and not
   5388 	 * "cache lines"
   5389 	 */
   5390 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   5391 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   5392 
   5393 	/* Momentarily enable the RX ring for the changes to take effect */
   5394 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   5395 	CSR_WRITE_FLUSH(sc);
   5396 	delay(150);
   5397 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5398 }
   5399 
   5400 /*
   5401  * wm_reset:
   5402  *
   5403  *	Reset the i82542 chip.
   5404  */
   5405 static void
   5406 wm_reset(struct wm_softc *sc)
   5407 {
   5408 	int phy_reset = 0;
   5409 	int i, error = 0;
   5410 	uint32_t reg;
   5411 	uint16_t kmreg;
   5412 	int rv;
   5413 
   5414 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5415 		device_xname(sc->sc_dev), __func__));
   5416 	KASSERT(sc->sc_type != 0);
   5417 
   5418 	/*
   5419 	 * Allocate on-chip memory according to the MTU size.
   5420 	 * The Packet Buffer Allocation register must be written
   5421 	 * before the chip is reset.
   5422 	 */
   5423 	switch (sc->sc_type) {
   5424 	case WM_T_82547:
   5425 	case WM_T_82547_2:
   5426 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5427 		    PBA_22K : PBA_30K;
   5428 		for (i = 0; i < sc->sc_nqueues; i++) {
   5429 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5430 			txq->txq_fifo_head = 0;
   5431 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   5432 			txq->txq_fifo_size =
   5433 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   5434 			txq->txq_fifo_stall = 0;
   5435 		}
   5436 		break;
   5437 	case WM_T_82571:
   5438 	case WM_T_82572:
   5439 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   5440 	case WM_T_80003:
   5441 		sc->sc_pba = PBA_32K;
   5442 		break;
   5443 	case WM_T_82573:
   5444 		sc->sc_pba = PBA_12K;
   5445 		break;
   5446 	case WM_T_82574:
   5447 	case WM_T_82583:
   5448 		sc->sc_pba = PBA_20K;
   5449 		break;
   5450 	case WM_T_82576:
   5451 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5452 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5453 		break;
   5454 	case WM_T_82580:
   5455 	case WM_T_I350:
   5456 	case WM_T_I354:
   5457 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5458 		break;
   5459 	case WM_T_I210:
   5460 	case WM_T_I211:
   5461 		sc->sc_pba = PBA_34K;
   5462 		break;
   5463 	case WM_T_ICH8:
   5464 		/* Workaround for a bit corruption issue in FIFO memory */
   5465 		sc->sc_pba = PBA_8K;
   5466 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5467 		break;
   5468 	case WM_T_ICH9:
   5469 	case WM_T_ICH10:
   5470 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5471 		    PBA_14K : PBA_10K;
   5472 		break;
   5473 	case WM_T_PCH:
   5474 	case WM_T_PCH2:	/* XXX 14K? */
   5475 	case WM_T_PCH_LPT:
   5476 	case WM_T_PCH_SPT:
   5477 	case WM_T_PCH_CNP:
   5478 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5479 		    PBA_12K : PBA_26K;
   5480 		break;
   5481 	default:
   5482 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5483 		    PBA_40K : PBA_48K;
   5484 		break;
   5485 	}
   5486 	/*
   5487 	 * Only old or non-multiqueue devices have the PBA register
   5488 	 * XXX Need special handling for 82575.
   5489 	 */
   5490 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5491 	    || (sc->sc_type == WM_T_82575))
   5492 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5493 
   5494 	/* Prevent the PCI-E bus from sticking */
   5495 	if (sc->sc_flags & WM_F_PCIE) {
   5496 		int timeout = 800;
   5497 
   5498 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5499 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5500 
   5501 		while (timeout--) {
   5502 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5503 			    == 0)
   5504 				break;
   5505 			delay(100);
   5506 		}
   5507 		if (timeout == 0)
   5508 			device_printf(sc->sc_dev,
   5509 			    "failed to disable bus mastering\n");
   5510 	}
   5511 
   5512 	/* Set the completion timeout for interface */
   5513 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5514 	    || (sc->sc_type == WM_T_82580)
   5515 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5516 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5517 		wm_set_pcie_completion_timeout(sc);
   5518 
   5519 	/* Clear interrupt */
   5520 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5521 	if (wm_is_using_msix(sc)) {
   5522 		if (sc->sc_type != WM_T_82574) {
   5523 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5524 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5525 		} else
   5526 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5527 	}
   5528 
   5529 	/* Stop the transmit and receive processes. */
   5530 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5531 	sc->sc_rctl &= ~RCTL_EN;
   5532 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5533 	CSR_WRITE_FLUSH(sc);
   5534 
   5535 	/* XXX set_tbi_sbp_82543() */
   5536 
   5537 	delay(10*1000);
   5538 
   5539 	/* Must acquire the MDIO ownership before MAC reset */
   5540 	switch (sc->sc_type) {
   5541 	case WM_T_82573:
   5542 	case WM_T_82574:
   5543 	case WM_T_82583:
   5544 		error = wm_get_hw_semaphore_82573(sc);
   5545 		break;
   5546 	default:
   5547 		break;
   5548 	}
   5549 
   5550 	/*
   5551 	 * 82541 Errata 29? & 82547 Errata 28?
   5552 	 * See also the description about PHY_RST bit in CTRL register
   5553 	 * in 8254x_GBe_SDM.pdf.
   5554 	 */
   5555 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5556 		CSR_WRITE(sc, WMREG_CTRL,
   5557 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5558 		CSR_WRITE_FLUSH(sc);
   5559 		delay(5000);
   5560 	}
   5561 
   5562 	switch (sc->sc_type) {
   5563 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5564 	case WM_T_82541:
   5565 	case WM_T_82541_2:
   5566 	case WM_T_82547:
   5567 	case WM_T_82547_2:
   5568 		/*
   5569 		 * On some chipsets, a reset through a memory-mapped write
   5570 		 * cycle can cause the chip to reset before completing the
   5571 		 * write cycle. This causes major headache that can be avoided
   5572 		 * by issuing the reset via indirect register writes through
   5573 		 * I/O space.
   5574 		 *
   5575 		 * So, if we successfully mapped the I/O BAR at attach time,
   5576 		 * use that. Otherwise, try our luck with a memory-mapped
   5577 		 * reset.
   5578 		 */
   5579 		if (sc->sc_flags & WM_F_IOH_VALID)
   5580 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5581 		else
   5582 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5583 		break;
   5584 	case WM_T_82545_3:
   5585 	case WM_T_82546_3:
   5586 		/* Use the shadow control register on these chips. */
   5587 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5588 		break;
   5589 	case WM_T_80003:
   5590 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5591 		if (sc->phy.acquire(sc) != 0)
   5592 			break;
   5593 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5594 		sc->phy.release(sc);
   5595 		break;
   5596 	case WM_T_ICH8:
   5597 	case WM_T_ICH9:
   5598 	case WM_T_ICH10:
   5599 	case WM_T_PCH:
   5600 	case WM_T_PCH2:
   5601 	case WM_T_PCH_LPT:
   5602 	case WM_T_PCH_SPT:
   5603 	case WM_T_PCH_CNP:
   5604 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5605 		if (wm_phy_resetisblocked(sc) == false) {
   5606 			/*
   5607 			 * Gate automatic PHY configuration by hardware on
   5608 			 * non-managed 82579
   5609 			 */
   5610 			if ((sc->sc_type == WM_T_PCH2)
   5611 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5612 				== 0))
   5613 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5614 
   5615 			reg |= CTRL_PHY_RESET;
   5616 			phy_reset = 1;
   5617 		} else
   5618 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5619 		if (sc->phy.acquire(sc) != 0)
   5620 			break;
   5621 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5622 		/* Don't insert a completion barrier when reset */
   5623 		delay(20*1000);
   5624 		mutex_exit(sc->sc_ich_phymtx);
   5625 		break;
   5626 	case WM_T_82580:
   5627 	case WM_T_I350:
   5628 	case WM_T_I354:
   5629 	case WM_T_I210:
   5630 	case WM_T_I211:
   5631 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5632 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5633 			CSR_WRITE_FLUSH(sc);
   5634 		delay(5000);
   5635 		break;
   5636 	case WM_T_82542_2_0:
   5637 	case WM_T_82542_2_1:
   5638 	case WM_T_82543:
   5639 	case WM_T_82540:
   5640 	case WM_T_82545:
   5641 	case WM_T_82546:
   5642 	case WM_T_82571:
   5643 	case WM_T_82572:
   5644 	case WM_T_82573:
   5645 	case WM_T_82574:
   5646 	case WM_T_82575:
   5647 	case WM_T_82576:
   5648 	case WM_T_82583:
   5649 	default:
   5650 		/* Everything else can safely use the documented method. */
   5651 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5652 		break;
   5653 	}
   5654 
   5655 	/* Must release the MDIO ownership after MAC reset */
   5656 	switch (sc->sc_type) {
   5657 	case WM_T_82573:
   5658 	case WM_T_82574:
   5659 	case WM_T_82583:
   5660 		if (error == 0)
   5661 			wm_put_hw_semaphore_82573(sc);
   5662 		break;
   5663 	default:
   5664 		break;
   5665 	}
   5666 
   5667 	/* Set Phy Config Counter to 50msec */
   5668 	if (sc->sc_type == WM_T_PCH2) {
   5669 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5670 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5671 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5672 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5673 	}
   5674 
   5675 	if (phy_reset != 0)
   5676 		wm_get_cfg_done(sc);
   5677 
   5678 	/* Reload EEPROM */
   5679 	switch (sc->sc_type) {
   5680 	case WM_T_82542_2_0:
   5681 	case WM_T_82542_2_1:
   5682 	case WM_T_82543:
   5683 	case WM_T_82544:
   5684 		delay(10);
   5685 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5686 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5687 		CSR_WRITE_FLUSH(sc);
   5688 		delay(2000);
   5689 		break;
   5690 	case WM_T_82540:
   5691 	case WM_T_82545:
   5692 	case WM_T_82545_3:
   5693 	case WM_T_82546:
   5694 	case WM_T_82546_3:
   5695 		delay(5*1000);
   5696 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5697 		break;
   5698 	case WM_T_82541:
   5699 	case WM_T_82541_2:
   5700 	case WM_T_82547:
   5701 	case WM_T_82547_2:
   5702 		delay(20000);
   5703 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5704 		break;
   5705 	case WM_T_82571:
   5706 	case WM_T_82572:
   5707 	case WM_T_82573:
   5708 	case WM_T_82574:
   5709 	case WM_T_82583:
   5710 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5711 			delay(10);
   5712 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5713 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5714 			CSR_WRITE_FLUSH(sc);
   5715 		}
   5716 		/* check EECD_EE_AUTORD */
   5717 		wm_get_auto_rd_done(sc);
   5718 		/*
   5719 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5720 		 * is set.
   5721 		 */
   5722 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5723 		    || (sc->sc_type == WM_T_82583))
   5724 			delay(25*1000);
   5725 		break;
   5726 	case WM_T_82575:
   5727 	case WM_T_82576:
   5728 	case WM_T_82580:
   5729 	case WM_T_I350:
   5730 	case WM_T_I354:
   5731 	case WM_T_I210:
   5732 	case WM_T_I211:
   5733 	case WM_T_80003:
   5734 		/* check EECD_EE_AUTORD */
   5735 		wm_get_auto_rd_done(sc);
   5736 		break;
   5737 	case WM_T_ICH8:
   5738 	case WM_T_ICH9:
   5739 	case WM_T_ICH10:
   5740 	case WM_T_PCH:
   5741 	case WM_T_PCH2:
   5742 	case WM_T_PCH_LPT:
   5743 	case WM_T_PCH_SPT:
   5744 	case WM_T_PCH_CNP:
   5745 		break;
   5746 	default:
   5747 		panic("%s: unknown type\n", __func__);
   5748 	}
   5749 
   5750 	/* Check whether EEPROM is present or not */
   5751 	switch (sc->sc_type) {
   5752 	case WM_T_82575:
   5753 	case WM_T_82576:
   5754 	case WM_T_82580:
   5755 	case WM_T_I350:
   5756 	case WM_T_I354:
   5757 	case WM_T_ICH8:
   5758 	case WM_T_ICH9:
   5759 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5760 			/* Not found */
   5761 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5762 			if (sc->sc_type == WM_T_82575)
   5763 				wm_reset_init_script_82575(sc);
   5764 		}
   5765 		break;
   5766 	default:
   5767 		break;
   5768 	}
   5769 
   5770 	if (phy_reset != 0)
   5771 		wm_phy_post_reset(sc);
   5772 
   5773 	if ((sc->sc_type == WM_T_82580)
   5774 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5775 		/* Clear global device reset status bit */
   5776 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5777 	}
   5778 
   5779 	/* Clear any pending interrupt events. */
   5780 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5781 	reg = CSR_READ(sc, WMREG_ICR);
   5782 	if (wm_is_using_msix(sc)) {
   5783 		if (sc->sc_type != WM_T_82574) {
   5784 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5785 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5786 		} else
   5787 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5788 	}
   5789 
   5790 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5791 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5792 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5793 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5794 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5795 		reg |= KABGTXD_BGSQLBIAS;
   5796 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5797 	}
   5798 
   5799 	/* Reload sc_ctrl */
   5800 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5801 
   5802 	wm_set_eee(sc);
   5803 
   5804 	/*
   5805 	 * For PCH, this write will make sure that any noise will be detected
   5806 	 * as a CRC error and be dropped rather than show up as a bad packet
   5807 	 * to the DMA engine
   5808 	 */
   5809 	if (sc->sc_type == WM_T_PCH)
   5810 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5811 
   5812 	if (sc->sc_type >= WM_T_82544)
   5813 		CSR_WRITE(sc, WMREG_WUC, 0);
   5814 
   5815 	if (sc->sc_type < WM_T_82575)
   5816 		wm_disable_aspm(sc); /* Workaround for some chips */
   5817 
   5818 	wm_reset_mdicnfg_82580(sc);
   5819 
   5820 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5821 		wm_pll_workaround_i210(sc);
   5822 
   5823 	if (sc->sc_type == WM_T_80003) {
   5824 		/* Default to TRUE to enable the MDIC W/A */
   5825 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5826 
   5827 		rv = wm_kmrn_readreg(sc,
   5828 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5829 		if (rv == 0) {
   5830 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5831 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5832 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5833 			else
   5834 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5835 		}
   5836 	}
   5837 }
   5838 
   5839 /*
   5840  * wm_add_rxbuf:
   5841  *
   5842  *	Add a receive buffer to the indiciated descriptor.
   5843  */
   5844 static int
   5845 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5846 {
   5847 	struct wm_softc *sc = rxq->rxq_sc;
   5848 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5849 	struct mbuf *m;
   5850 	int error;
   5851 
   5852 	KASSERT(mutex_owned(rxq->rxq_lock));
   5853 
   5854 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5855 	if (m == NULL)
   5856 		return ENOBUFS;
   5857 
   5858 	MCLGET(m, M_DONTWAIT);
   5859 	if ((m->m_flags & M_EXT) == 0) {
   5860 		m_freem(m);
   5861 		return ENOBUFS;
   5862 	}
   5863 
   5864 	if (rxs->rxs_mbuf != NULL)
   5865 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5866 
   5867 	rxs->rxs_mbuf = m;
   5868 
   5869 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5870 	/*
   5871 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5872 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5873 	 */
   5874 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5875 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5876 	if (error) {
   5877 		/* XXX XXX XXX */
   5878 		aprint_error_dev(sc->sc_dev,
   5879 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5880 		panic("wm_add_rxbuf");
   5881 	}
   5882 
   5883 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5884 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5885 
   5886 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5887 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5888 			wm_init_rxdesc(rxq, idx);
   5889 	} else
   5890 		wm_init_rxdesc(rxq, idx);
   5891 
   5892 	return 0;
   5893 }
   5894 
   5895 /*
   5896  * wm_rxdrain:
   5897  *
   5898  *	Drain the receive queue.
   5899  */
   5900 static void
   5901 wm_rxdrain(struct wm_rxqueue *rxq)
   5902 {
   5903 	struct wm_softc *sc = rxq->rxq_sc;
   5904 	struct wm_rxsoft *rxs;
   5905 	int i;
   5906 
   5907 	KASSERT(mutex_owned(rxq->rxq_lock));
   5908 
   5909 	for (i = 0; i < WM_NRXDESC; i++) {
   5910 		rxs = &rxq->rxq_soft[i];
   5911 		if (rxs->rxs_mbuf != NULL) {
   5912 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5913 			m_freem(rxs->rxs_mbuf);
   5914 			rxs->rxs_mbuf = NULL;
   5915 		}
   5916 	}
   5917 }
   5918 
   5919 /*
   5920  * Setup registers for RSS.
   5921  *
   5922  * XXX not yet VMDq support
   5923  */
   5924 static void
   5925 wm_init_rss(struct wm_softc *sc)
   5926 {
   5927 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5928 	int i;
   5929 
   5930 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5931 
   5932 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5933 		unsigned int qid, reta_ent;
   5934 
   5935 		qid  = i % sc->sc_nqueues;
   5936 		switch (sc->sc_type) {
   5937 		case WM_T_82574:
   5938 			reta_ent = __SHIFTIN(qid,
   5939 			    RETA_ENT_QINDEX_MASK_82574);
   5940 			break;
   5941 		case WM_T_82575:
   5942 			reta_ent = __SHIFTIN(qid,
   5943 			    RETA_ENT_QINDEX1_MASK_82575);
   5944 			break;
   5945 		default:
   5946 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5947 			break;
   5948 		}
   5949 
   5950 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5951 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5952 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5953 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5954 	}
   5955 
   5956 	rss_getkey((uint8_t *)rss_key);
   5957 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5958 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5959 
   5960 	if (sc->sc_type == WM_T_82574)
   5961 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5962 	else
   5963 		mrqc = MRQC_ENABLE_RSS_MQ;
   5964 
   5965 	/*
   5966 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5967 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5968 	 */
   5969 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5970 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5971 #if 0
   5972 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5973 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5974 #endif
   5975 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5976 
   5977 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5978 }
   5979 
   5980 /*
   5981  * Adjust TX and RX queue numbers which the system actulally uses.
   5982  *
   5983  * The numbers are affected by below parameters.
   5984  *     - The nubmer of hardware queues
   5985  *     - The number of MSI-X vectors (= "nvectors" argument)
   5986  *     - ncpu
   5987  */
   5988 static void
   5989 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5990 {
   5991 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5992 
   5993 	if (nvectors < 2) {
   5994 		sc->sc_nqueues = 1;
   5995 		return;
   5996 	}
   5997 
   5998 	switch (sc->sc_type) {
   5999 	case WM_T_82572:
   6000 		hw_ntxqueues = 2;
   6001 		hw_nrxqueues = 2;
   6002 		break;
   6003 	case WM_T_82574:
   6004 		hw_ntxqueues = 2;
   6005 		hw_nrxqueues = 2;
   6006 		break;
   6007 	case WM_T_82575:
   6008 		hw_ntxqueues = 4;
   6009 		hw_nrxqueues = 4;
   6010 		break;
   6011 	case WM_T_82576:
   6012 		hw_ntxqueues = 16;
   6013 		hw_nrxqueues = 16;
   6014 		break;
   6015 	case WM_T_82580:
   6016 	case WM_T_I350:
   6017 	case WM_T_I354:
   6018 		hw_ntxqueues = 8;
   6019 		hw_nrxqueues = 8;
   6020 		break;
   6021 	case WM_T_I210:
   6022 		hw_ntxqueues = 4;
   6023 		hw_nrxqueues = 4;
   6024 		break;
   6025 	case WM_T_I211:
   6026 		hw_ntxqueues = 2;
   6027 		hw_nrxqueues = 2;
   6028 		break;
   6029 		/*
   6030 		 * The below Ethernet controllers do not support MSI-X;
   6031 		 * this driver doesn't let them use multiqueue.
   6032 		 *     - WM_T_80003
   6033 		 *     - WM_T_ICH8
   6034 		 *     - WM_T_ICH9
   6035 		 *     - WM_T_ICH10
   6036 		 *     - WM_T_PCH
   6037 		 *     - WM_T_PCH2
   6038 		 *     - WM_T_PCH_LPT
   6039 		 */
   6040 	default:
   6041 		hw_ntxqueues = 1;
   6042 		hw_nrxqueues = 1;
   6043 		break;
   6044 	}
   6045 
   6046 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   6047 
   6048 	/*
   6049 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   6050 	 * the number of queues used actually.
   6051 	 */
   6052 	if (nvectors < hw_nqueues + 1)
   6053 		sc->sc_nqueues = nvectors - 1;
   6054 	else
   6055 		sc->sc_nqueues = hw_nqueues;
   6056 
   6057 	/*
   6058 	 * As queues more than CPUs cannot improve scaling, we limit
   6059 	 * the number of queues used actually.
   6060 	 */
   6061 	if (ncpu < sc->sc_nqueues)
   6062 		sc->sc_nqueues = ncpu;
   6063 }
   6064 
   6065 static inline bool
   6066 wm_is_using_msix(struct wm_softc *sc)
   6067 {
   6068 
   6069 	return (sc->sc_nintrs > 1);
   6070 }
   6071 
   6072 static inline bool
   6073 wm_is_using_multiqueue(struct wm_softc *sc)
   6074 {
   6075 
   6076 	return (sc->sc_nqueues > 1);
   6077 }
   6078 
   6079 static int
   6080 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   6081 {
   6082 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   6083 
   6084 	wmq->wmq_id = qidx;
   6085 	wmq->wmq_intr_idx = intr_idx;
   6086 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   6087 	    wm_handle_queue, wmq);
   6088 	if (wmq->wmq_si != NULL)
   6089 		return 0;
   6090 
   6091 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   6092 	    wmq->wmq_id);
   6093 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   6094 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6095 	return ENOMEM;
   6096 }
   6097 
   6098 /*
   6099  * Both single interrupt MSI and INTx can use this function.
   6100  */
   6101 static int
   6102 wm_setup_legacy(struct wm_softc *sc)
   6103 {
   6104 	pci_chipset_tag_t pc = sc->sc_pc;
   6105 	const char *intrstr = NULL;
   6106 	char intrbuf[PCI_INTRSTR_LEN];
   6107 	int error;
   6108 
   6109 	error = wm_alloc_txrx_queues(sc);
   6110 	if (error) {
   6111 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6112 		    error);
   6113 		return ENOMEM;
   6114 	}
   6115 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   6116 	    sizeof(intrbuf));
   6117 #ifdef WM_MPSAFE
   6118 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   6119 #endif
   6120 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   6121 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   6122 	if (sc->sc_ihs[0] == NULL) {
   6123 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   6124 		    (pci_intr_type(pc, sc->sc_intrs[0])
   6125 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6126 		return ENOMEM;
   6127 	}
   6128 
   6129 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   6130 	sc->sc_nintrs = 1;
   6131 
   6132 	return wm_softint_establish_queue(sc, 0, 0);
   6133 }
   6134 
   6135 static int
   6136 wm_setup_msix(struct wm_softc *sc)
   6137 {
   6138 	void *vih;
   6139 	kcpuset_t *affinity;
   6140 	int qidx, error, intr_idx, txrx_established;
   6141 	pci_chipset_tag_t pc = sc->sc_pc;
   6142 	const char *intrstr = NULL;
   6143 	char intrbuf[PCI_INTRSTR_LEN];
   6144 	char intr_xname[INTRDEVNAMEBUF];
   6145 
   6146 	if (sc->sc_nqueues < ncpu) {
   6147 		/*
   6148 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   6149 		 * interrupts start from CPU#1.
   6150 		 */
   6151 		sc->sc_affinity_offset = 1;
   6152 	} else {
   6153 		/*
   6154 		 * In this case, this device use all CPUs. So, we unify
   6155 		 * affinitied cpu_index to msix vector number for readability.
   6156 		 */
   6157 		sc->sc_affinity_offset = 0;
   6158 	}
   6159 
   6160 	error = wm_alloc_txrx_queues(sc);
   6161 	if (error) {
   6162 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6163 		    error);
   6164 		return ENOMEM;
   6165 	}
   6166 
   6167 	kcpuset_create(&affinity, false);
   6168 	intr_idx = 0;
   6169 
   6170 	/*
   6171 	 * TX and RX
   6172 	 */
   6173 	txrx_established = 0;
   6174 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6175 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6176 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   6177 
   6178 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6179 		    sizeof(intrbuf));
   6180 #ifdef WM_MPSAFE
   6181 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   6182 		    PCI_INTR_MPSAFE, true);
   6183 #endif
   6184 		memset(intr_xname, 0, sizeof(intr_xname));
   6185 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   6186 		    device_xname(sc->sc_dev), qidx);
   6187 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6188 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   6189 		if (vih == NULL) {
   6190 			aprint_error_dev(sc->sc_dev,
   6191 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   6192 			    intrstr ? " at " : "",
   6193 			    intrstr ? intrstr : "");
   6194 
   6195 			goto fail;
   6196 		}
   6197 		kcpuset_zero(affinity);
   6198 		/* Round-robin affinity */
   6199 		kcpuset_set(affinity, affinity_to);
   6200 		error = interrupt_distribute(vih, affinity, NULL);
   6201 		if (error == 0) {
   6202 			aprint_normal_dev(sc->sc_dev,
   6203 			    "for TX and RX interrupting at %s affinity to %u\n",
   6204 			    intrstr, affinity_to);
   6205 		} else {
   6206 			aprint_normal_dev(sc->sc_dev,
   6207 			    "for TX and RX interrupting at %s\n", intrstr);
   6208 		}
   6209 		sc->sc_ihs[intr_idx] = vih;
   6210 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   6211 			goto fail;
   6212 		txrx_established++;
   6213 		intr_idx++;
   6214 	}
   6215 
   6216 	/* LINK */
   6217 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6218 	    sizeof(intrbuf));
   6219 #ifdef WM_MPSAFE
   6220 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   6221 #endif
   6222 	memset(intr_xname, 0, sizeof(intr_xname));
   6223 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   6224 	    device_xname(sc->sc_dev));
   6225 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6226 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   6227 	if (vih == NULL) {
   6228 		aprint_error_dev(sc->sc_dev,
   6229 		    "unable to establish MSI-X(for LINK)%s%s\n",
   6230 		    intrstr ? " at " : "",
   6231 		    intrstr ? intrstr : "");
   6232 
   6233 		goto fail;
   6234 	}
   6235 	/* Keep default affinity to LINK interrupt */
   6236 	aprint_normal_dev(sc->sc_dev,
   6237 	    "for LINK interrupting at %s\n", intrstr);
   6238 	sc->sc_ihs[intr_idx] = vih;
   6239 	sc->sc_link_intr_idx = intr_idx;
   6240 
   6241 	sc->sc_nintrs = sc->sc_nqueues + 1;
   6242 	kcpuset_destroy(affinity);
   6243 	return 0;
   6244 
   6245  fail:
   6246 	for (qidx = 0; qidx < txrx_established; qidx++) {
   6247 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6248 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   6249 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6250 	}
   6251 
   6252 	kcpuset_destroy(affinity);
   6253 	return ENOMEM;
   6254 }
   6255 
   6256 static void
   6257 wm_unset_stopping_flags(struct wm_softc *sc)
   6258 {
   6259 	int i;
   6260 
   6261 	KASSERT(WM_CORE_LOCKED(sc));
   6262 
   6263 	/* Must unset stopping flags in ascending order. */
   6264 	for (i = 0; i < sc->sc_nqueues; i++) {
   6265 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6266 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6267 
   6268 		mutex_enter(txq->txq_lock);
   6269 		txq->txq_stopping = false;
   6270 		mutex_exit(txq->txq_lock);
   6271 
   6272 		mutex_enter(rxq->rxq_lock);
   6273 		rxq->rxq_stopping = false;
   6274 		mutex_exit(rxq->rxq_lock);
   6275 	}
   6276 
   6277 	sc->sc_core_stopping = false;
   6278 }
   6279 
   6280 static void
   6281 wm_set_stopping_flags(struct wm_softc *sc)
   6282 {
   6283 	int i;
   6284 
   6285 	KASSERT(WM_CORE_LOCKED(sc));
   6286 
   6287 	sc->sc_core_stopping = true;
   6288 
   6289 	/* Must set stopping flags in ascending order. */
   6290 	for (i = 0; i < sc->sc_nqueues; i++) {
   6291 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6292 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6293 
   6294 		mutex_enter(rxq->rxq_lock);
   6295 		rxq->rxq_stopping = true;
   6296 		mutex_exit(rxq->rxq_lock);
   6297 
   6298 		mutex_enter(txq->txq_lock);
   6299 		txq->txq_stopping = true;
   6300 		mutex_exit(txq->txq_lock);
   6301 	}
   6302 }
   6303 
   6304 /*
   6305  * Write interrupt interval value to ITR or EITR
   6306  */
   6307 static void
   6308 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   6309 {
   6310 
   6311 	if (!wmq->wmq_set_itr)
   6312 		return;
   6313 
   6314 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6315 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   6316 
   6317 		/*
   6318 		 * 82575 doesn't have CNT_INGR field.
   6319 		 * So, overwrite counter field by software.
   6320 		 */
   6321 		if (sc->sc_type == WM_T_82575)
   6322 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   6323 		else
   6324 			eitr |= EITR_CNT_INGR;
   6325 
   6326 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   6327 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   6328 		/*
   6329 		 * 82574 has both ITR and EITR. SET EITR when we use
   6330 		 * the multi queue function with MSI-X.
   6331 		 */
   6332 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   6333 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   6334 	} else {
   6335 		KASSERT(wmq->wmq_id == 0);
   6336 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   6337 	}
   6338 
   6339 	wmq->wmq_set_itr = false;
   6340 }
   6341 
   6342 /*
   6343  * TODO
   6344  * Below dynamic calculation of itr is almost the same as Linux igb,
   6345  * however it does not fit to wm(4). So, we will have been disable AIM
   6346  * until we will find appropriate calculation of itr.
   6347  */
   6348 /*
   6349  * Calculate interrupt interval value to be going to write register in
   6350  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   6351  */
   6352 static void
   6353 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   6354 {
   6355 #ifdef NOTYET
   6356 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6357 	struct wm_txqueue *txq = &wmq->wmq_txq;
   6358 	uint32_t avg_size = 0;
   6359 	uint32_t new_itr;
   6360 
   6361 	if (rxq->rxq_packets)
   6362 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   6363 	if (txq->txq_packets)
   6364 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   6365 
   6366 	if (avg_size == 0) {
   6367 		new_itr = 450; /* restore default value */
   6368 		goto out;
   6369 	}
   6370 
   6371 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   6372 	avg_size += 24;
   6373 
   6374 	/* Don't starve jumbo frames */
   6375 	avg_size = uimin(avg_size, 3000);
   6376 
   6377 	/* Give a little boost to mid-size frames */
   6378 	if ((avg_size > 300) && (avg_size < 1200))
   6379 		new_itr = avg_size / 3;
   6380 	else
   6381 		new_itr = avg_size / 2;
   6382 
   6383 out:
   6384 	/*
   6385 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   6386 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   6387 	 */
   6388 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   6389 		new_itr *= 4;
   6390 
   6391 	if (new_itr != wmq->wmq_itr) {
   6392 		wmq->wmq_itr = new_itr;
   6393 		wmq->wmq_set_itr = true;
   6394 	} else
   6395 		wmq->wmq_set_itr = false;
   6396 
   6397 	rxq->rxq_packets = 0;
   6398 	rxq->rxq_bytes = 0;
   6399 	txq->txq_packets = 0;
   6400 	txq->txq_bytes = 0;
   6401 #endif
   6402 }
   6403 
   6404 static void
   6405 wm_init_sysctls(struct wm_softc *sc)
   6406 {
   6407 	struct sysctllog **log;
   6408 	const struct sysctlnode *rnode, *qnode, *cnode;
   6409 	int i, rv;
   6410 	const char *dvname;
   6411 
   6412 	log = &sc->sc_sysctllog;
   6413 	dvname = device_xname(sc->sc_dev);
   6414 
   6415 	rv = sysctl_createv(log, 0, NULL, &rnode,
   6416 	    0, CTLTYPE_NODE, dvname,
   6417 	    SYSCTL_DESCR("wm information and settings"),
   6418 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   6419 	if (rv != 0)
   6420 		goto err;
   6421 
   6422 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6423 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   6424 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   6425 	if (rv != 0)
   6426 		goto teardown;
   6427 
   6428 	for (i = 0; i < sc->sc_nqueues; i++) {
   6429 		struct wm_queue *wmq = &sc->sc_queue[i];
   6430 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6431 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6432 
   6433 		snprintf(sc->sc_queue[i].sysctlname,
   6434 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   6435 
   6436 		if (sysctl_createv(log, 0, &rnode, &qnode,
   6437 		    0, CTLTYPE_NODE,
   6438 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   6439 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   6440 			break;
   6441 
   6442 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6443 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6444 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   6445 		    NULL, 0, &txq->txq_free,
   6446 		    0, CTL_CREATE, CTL_EOL) != 0)
   6447 			break;
   6448 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6449 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6450 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
   6451 		    wm_sysctl_tdh_handler, 0, (void *)txq,
   6452 		    0, CTL_CREATE, CTL_EOL) != 0)
   6453 			break;
   6454 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6455 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6456 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
   6457 		    wm_sysctl_tdt_handler, 0, (void *)txq,
   6458 		    0, CTL_CREATE, CTL_EOL) != 0)
   6459 			break;
   6460 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6461 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6462 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6463 		    NULL, 0, &txq->txq_next,
   6464 		    0, CTL_CREATE, CTL_EOL) != 0)
   6465 			break;
   6466 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6467 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6468 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
   6469 		    NULL, 0, &txq->txq_sfree,
   6470 		    0, CTL_CREATE, CTL_EOL) != 0)
   6471 			break;
   6472 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6473 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6474 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
   6475 		    NULL, 0, &txq->txq_snext,
   6476 		    0, CTL_CREATE, CTL_EOL) != 0)
   6477 			break;
   6478 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6479 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6480 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
   6481 		    NULL, 0, &txq->txq_sdirty,
   6482 		    0, CTL_CREATE, CTL_EOL) != 0)
   6483 			break;
   6484 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6485 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6486 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
   6487 		    NULL, 0, &txq->txq_flags,
   6488 		    0, CTL_CREATE, CTL_EOL) != 0)
   6489 			break;
   6490 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6491 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6492 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
   6493 		    NULL, 0, &txq->txq_stopping,
   6494 		    0, CTL_CREATE, CTL_EOL) != 0)
   6495 			break;
   6496 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6497 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6498 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
   6499 		    NULL, 0, &txq->txq_sending,
   6500 		    0, CTL_CREATE, CTL_EOL) != 0)
   6501 			break;
   6502 
   6503 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6504 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6505 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6506 		    NULL, 0, &rxq->rxq_ptr,
   6507 		    0, CTL_CREATE, CTL_EOL) != 0)
   6508 			break;
   6509 	}
   6510 
   6511 #ifdef WM_DEBUG
   6512 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6513 	    CTLTYPE_INT, "debug_flags",
   6514 	    SYSCTL_DESCR(
   6515 		    "Debug flags:\n"	\
   6516 		    "\t0x01 LINK\n"	\
   6517 		    "\t0x02 TX\n"	\
   6518 		    "\t0x04 RX\n"	\
   6519 		    "\t0x08 GMII\n"	\
   6520 		    "\t0x10 MANAGE\n"	\
   6521 		    "\t0x20 NVM\n"	\
   6522 		    "\t0x40 INIT\n"	\
   6523 		    "\t0x80 LOCK"),
   6524 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6525 	if (rv != 0)
   6526 		goto teardown;
   6527 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6528 	    CTLTYPE_BOOL, "trigger_reset",
   6529 	    SYSCTL_DESCR("Trigger an interface reset"),
   6530 	    NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
   6531 	if (rv != 0)
   6532 		goto teardown;
   6533 #endif
   6534 
   6535 	return;
   6536 
   6537 teardown:
   6538 	sysctl_teardown(log);
   6539 err:
   6540 	sc->sc_sysctllog = NULL;
   6541 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6542 	    __func__, rv);
   6543 }
   6544 
   6545 /*
   6546  * wm_init:		[ifnet interface function]
   6547  *
   6548  *	Initialize the interface.
   6549  */
   6550 static int
   6551 wm_init(struct ifnet *ifp)
   6552 {
   6553 	struct wm_softc *sc = ifp->if_softc;
   6554 	int ret;
   6555 
   6556 	KASSERT(IFNET_LOCKED(ifp));
   6557 
   6558 	if (sc->sc_dying)
   6559 		return ENXIO;
   6560 
   6561 	WM_CORE_LOCK(sc);
   6562 	ret = wm_init_locked(ifp);
   6563 	WM_CORE_UNLOCK(sc);
   6564 
   6565 	return ret;
   6566 }
   6567 
   6568 static int
   6569 wm_init_locked(struct ifnet *ifp)
   6570 {
   6571 	struct wm_softc *sc = ifp->if_softc;
   6572 	struct ethercom *ec = &sc->sc_ethercom;
   6573 	int i, j, trynum, error = 0;
   6574 	uint32_t reg, sfp_mask = 0;
   6575 
   6576 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6577 		device_xname(sc->sc_dev), __func__));
   6578 	KASSERT(IFNET_LOCKED(ifp));
   6579 	KASSERT(WM_CORE_LOCKED(sc));
   6580 
   6581 	/*
   6582 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6583 	 * There is a small but measurable benefit to avoiding the adjusment
   6584 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6585 	 * on such platforms.  One possibility is that the DMA itself is
   6586 	 * slightly more efficient if the front of the entire packet (instead
   6587 	 * of the front of the headers) is aligned.
   6588 	 *
   6589 	 * Note we must always set align_tweak to 0 if we are using
   6590 	 * jumbo frames.
   6591 	 */
   6592 #ifdef __NO_STRICT_ALIGNMENT
   6593 	sc->sc_align_tweak = 0;
   6594 #else
   6595 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6596 		sc->sc_align_tweak = 0;
   6597 	else
   6598 		sc->sc_align_tweak = 2;
   6599 #endif /* __NO_STRICT_ALIGNMENT */
   6600 
   6601 	/* Cancel any pending I/O. */
   6602 	wm_stop_locked(ifp, false, false);
   6603 
   6604 	/* Update statistics before reset */
   6605 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6606 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6607 
   6608 	/* >= PCH_SPT hardware workaround before reset. */
   6609 	if (sc->sc_type >= WM_T_PCH_SPT)
   6610 		wm_flush_desc_rings(sc);
   6611 
   6612 	/* Reset the chip to a known state. */
   6613 	wm_reset(sc);
   6614 
   6615 	/*
   6616 	 * AMT based hardware can now take control from firmware
   6617 	 * Do this after reset.
   6618 	 */
   6619 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6620 		wm_get_hw_control(sc);
   6621 
   6622 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6623 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6624 		wm_legacy_irq_quirk_spt(sc);
   6625 
   6626 	/* Init hardware bits */
   6627 	wm_initialize_hardware_bits(sc);
   6628 
   6629 	/* Reset the PHY. */
   6630 	if (sc->sc_flags & WM_F_HAS_MII)
   6631 		wm_gmii_reset(sc);
   6632 
   6633 	if (sc->sc_type >= WM_T_ICH8) {
   6634 		reg = CSR_READ(sc, WMREG_GCR);
   6635 		/*
   6636 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6637 		 * default after reset.
   6638 		 */
   6639 		if (sc->sc_type == WM_T_ICH8)
   6640 			reg |= GCR_NO_SNOOP_ALL;
   6641 		else
   6642 			reg &= ~GCR_NO_SNOOP_ALL;
   6643 		CSR_WRITE(sc, WMREG_GCR, reg);
   6644 	}
   6645 
   6646 	if ((sc->sc_type >= WM_T_ICH8)
   6647 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6648 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6649 
   6650 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6651 		reg |= CTRL_EXT_RO_DIS;
   6652 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6653 	}
   6654 
   6655 	/* Calculate (E)ITR value */
   6656 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6657 		/*
   6658 		 * For NEWQUEUE's EITR (except for 82575).
   6659 		 * 82575's EITR should be set same throttling value as other
   6660 		 * old controllers' ITR because the interrupt/sec calculation
   6661 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6662 		 *
   6663 		 * 82574's EITR should be set same throttling value as ITR.
   6664 		 *
   6665 		 * For N interrupts/sec, set this value to:
   6666 		 * 1,000,000 / N in contrast to ITR throttling value.
   6667 		 */
   6668 		sc->sc_itr_init = 450;
   6669 	} else if (sc->sc_type >= WM_T_82543) {
   6670 		/*
   6671 		 * Set up the interrupt throttling register (units of 256ns)
   6672 		 * Note that a footnote in Intel's documentation says this
   6673 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6674 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6675 		 * that that is also true for the 1024ns units of the other
   6676 		 * interrupt-related timer registers -- so, really, we ought
   6677 		 * to divide this value by 4 when the link speed is low.
   6678 		 *
   6679 		 * XXX implement this division at link speed change!
   6680 		 */
   6681 
   6682 		/*
   6683 		 * For N interrupts/sec, set this value to:
   6684 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6685 		 * absolute and packet timer values to this value
   6686 		 * divided by 4 to get "simple timer" behavior.
   6687 		 */
   6688 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6689 	}
   6690 
   6691 	error = wm_init_txrx_queues(sc);
   6692 	if (error)
   6693 		goto out;
   6694 
   6695 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6696 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6697 	    (sc->sc_type >= WM_T_82575))
   6698 		wm_serdes_power_up_link_82575(sc);
   6699 
   6700 	/* Clear out the VLAN table -- we don't use it (yet). */
   6701 	CSR_WRITE(sc, WMREG_VET, 0);
   6702 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6703 		trynum = 10; /* Due to hw errata */
   6704 	else
   6705 		trynum = 1;
   6706 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6707 		for (j = 0; j < trynum; j++)
   6708 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6709 
   6710 	/*
   6711 	 * Set up flow-control parameters.
   6712 	 *
   6713 	 * XXX Values could probably stand some tuning.
   6714 	 */
   6715 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6716 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6717 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6718 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6719 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6720 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6721 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6722 	}
   6723 
   6724 	sc->sc_fcrtl = FCRTL_DFLT;
   6725 	if (sc->sc_type < WM_T_82543) {
   6726 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6727 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6728 	} else {
   6729 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6730 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6731 	}
   6732 
   6733 	if (sc->sc_type == WM_T_80003)
   6734 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6735 	else
   6736 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6737 
   6738 	/* Writes the control register. */
   6739 	wm_set_vlan(sc);
   6740 
   6741 	if (sc->sc_flags & WM_F_HAS_MII) {
   6742 		uint16_t kmreg;
   6743 
   6744 		switch (sc->sc_type) {
   6745 		case WM_T_80003:
   6746 		case WM_T_ICH8:
   6747 		case WM_T_ICH9:
   6748 		case WM_T_ICH10:
   6749 		case WM_T_PCH:
   6750 		case WM_T_PCH2:
   6751 		case WM_T_PCH_LPT:
   6752 		case WM_T_PCH_SPT:
   6753 		case WM_T_PCH_CNP:
   6754 			/*
   6755 			 * Set the mac to wait the maximum time between each
   6756 			 * iteration and increase the max iterations when
   6757 			 * polling the phy; this fixes erroneous timeouts at
   6758 			 * 10Mbps.
   6759 			 */
   6760 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6761 			    0xFFFF);
   6762 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6763 			    &kmreg);
   6764 			kmreg |= 0x3F;
   6765 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6766 			    kmreg);
   6767 			break;
   6768 		default:
   6769 			break;
   6770 		}
   6771 
   6772 		if (sc->sc_type == WM_T_80003) {
   6773 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6774 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6775 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6776 
   6777 			/* Bypass RX and TX FIFOs */
   6778 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6779 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6780 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6781 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6782 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6783 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6784 		}
   6785 	}
   6786 #if 0
   6787 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6788 #endif
   6789 
   6790 	/* Set up checksum offload parameters. */
   6791 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6792 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6793 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6794 		reg |= RXCSUM_IPOFL;
   6795 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6796 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6797 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6798 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6799 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6800 
   6801 	/* Set registers about MSI-X */
   6802 	if (wm_is_using_msix(sc)) {
   6803 		uint32_t ivar, qintr_idx;
   6804 		struct wm_queue *wmq;
   6805 		unsigned int qid;
   6806 
   6807 		if (sc->sc_type == WM_T_82575) {
   6808 			/* Interrupt control */
   6809 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6810 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6811 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6812 
   6813 			/* TX and RX */
   6814 			for (i = 0; i < sc->sc_nqueues; i++) {
   6815 				wmq = &sc->sc_queue[i];
   6816 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6817 				    EITR_TX_QUEUE(wmq->wmq_id)
   6818 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6819 			}
   6820 			/* Link status */
   6821 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6822 			    EITR_OTHER);
   6823 		} else if (sc->sc_type == WM_T_82574) {
   6824 			/* Interrupt control */
   6825 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6826 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6827 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6828 
   6829 			/*
   6830 			 * Work around issue with spurious interrupts
   6831 			 * in MSI-X mode.
   6832 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6833 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6834 			 */
   6835 			reg = CSR_READ(sc, WMREG_RFCTL);
   6836 			reg |= WMREG_RFCTL_ACKDIS;
   6837 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6838 
   6839 			ivar = 0;
   6840 			/* TX and RX */
   6841 			for (i = 0; i < sc->sc_nqueues; i++) {
   6842 				wmq = &sc->sc_queue[i];
   6843 				qid = wmq->wmq_id;
   6844 				qintr_idx = wmq->wmq_intr_idx;
   6845 
   6846 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6847 				    IVAR_TX_MASK_Q_82574(qid));
   6848 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6849 				    IVAR_RX_MASK_Q_82574(qid));
   6850 			}
   6851 			/* Link status */
   6852 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6853 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6854 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6855 		} else {
   6856 			/* Interrupt control */
   6857 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6858 			    | GPIE_EIAME | GPIE_PBA);
   6859 
   6860 			switch (sc->sc_type) {
   6861 			case WM_T_82580:
   6862 			case WM_T_I350:
   6863 			case WM_T_I354:
   6864 			case WM_T_I210:
   6865 			case WM_T_I211:
   6866 				/* TX and RX */
   6867 				for (i = 0; i < sc->sc_nqueues; i++) {
   6868 					wmq = &sc->sc_queue[i];
   6869 					qid = wmq->wmq_id;
   6870 					qintr_idx = wmq->wmq_intr_idx;
   6871 
   6872 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6873 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6874 					ivar |= __SHIFTIN((qintr_idx
   6875 						| IVAR_VALID),
   6876 					    IVAR_TX_MASK_Q(qid));
   6877 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6878 					ivar |= __SHIFTIN((qintr_idx
   6879 						| IVAR_VALID),
   6880 					    IVAR_RX_MASK_Q(qid));
   6881 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6882 				}
   6883 				break;
   6884 			case WM_T_82576:
   6885 				/* TX and RX */
   6886 				for (i = 0; i < sc->sc_nqueues; i++) {
   6887 					wmq = &sc->sc_queue[i];
   6888 					qid = wmq->wmq_id;
   6889 					qintr_idx = wmq->wmq_intr_idx;
   6890 
   6891 					ivar = CSR_READ(sc,
   6892 					    WMREG_IVAR_Q_82576(qid));
   6893 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6894 					ivar |= __SHIFTIN((qintr_idx
   6895 						| IVAR_VALID),
   6896 					    IVAR_TX_MASK_Q_82576(qid));
   6897 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6898 					ivar |= __SHIFTIN((qintr_idx
   6899 						| IVAR_VALID),
   6900 					    IVAR_RX_MASK_Q_82576(qid));
   6901 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6902 					    ivar);
   6903 				}
   6904 				break;
   6905 			default:
   6906 				break;
   6907 			}
   6908 
   6909 			/* Link status */
   6910 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6911 			    IVAR_MISC_OTHER);
   6912 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6913 		}
   6914 
   6915 		if (wm_is_using_multiqueue(sc)) {
   6916 			wm_init_rss(sc);
   6917 
   6918 			/*
   6919 			** NOTE: Receive Full-Packet Checksum Offload
   6920 			** is mutually exclusive with Multiqueue. However
   6921 			** this is not the same as TCP/IP checksums which
   6922 			** still work.
   6923 			*/
   6924 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6925 			reg |= RXCSUM_PCSD;
   6926 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6927 		}
   6928 	}
   6929 
   6930 	/* Set up the interrupt registers. */
   6931 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6932 
   6933 	/* Enable SFP module insertion interrupt if it's required */
   6934 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6935 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6936 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6937 		sfp_mask = ICR_GPI(0);
   6938 	}
   6939 
   6940 	if (wm_is_using_msix(sc)) {
   6941 		uint32_t mask;
   6942 		struct wm_queue *wmq;
   6943 
   6944 		switch (sc->sc_type) {
   6945 		case WM_T_82574:
   6946 			mask = 0;
   6947 			for (i = 0; i < sc->sc_nqueues; i++) {
   6948 				wmq = &sc->sc_queue[i];
   6949 				mask |= ICR_TXQ(wmq->wmq_id);
   6950 				mask |= ICR_RXQ(wmq->wmq_id);
   6951 			}
   6952 			mask |= ICR_OTHER;
   6953 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6954 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6955 			break;
   6956 		default:
   6957 			if (sc->sc_type == WM_T_82575) {
   6958 				mask = 0;
   6959 				for (i = 0; i < sc->sc_nqueues; i++) {
   6960 					wmq = &sc->sc_queue[i];
   6961 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6962 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6963 				}
   6964 				mask |= EITR_OTHER;
   6965 			} else {
   6966 				mask = 0;
   6967 				for (i = 0; i < sc->sc_nqueues; i++) {
   6968 					wmq = &sc->sc_queue[i];
   6969 					mask |= 1 << wmq->wmq_intr_idx;
   6970 				}
   6971 				mask |= 1 << sc->sc_link_intr_idx;
   6972 			}
   6973 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6974 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6975 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6976 
   6977 			/* For other interrupts */
   6978 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6979 			break;
   6980 		}
   6981 	} else {
   6982 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6983 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6984 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6985 	}
   6986 
   6987 	/* Set up the inter-packet gap. */
   6988 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6989 
   6990 	if (sc->sc_type >= WM_T_82543) {
   6991 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6992 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6993 			wm_itrs_writereg(sc, wmq);
   6994 		}
   6995 		/*
   6996 		 * Link interrupts occur much less than TX
   6997 		 * interrupts and RX interrupts. So, we don't
   6998 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6999 		 * FreeBSD's if_igb.
   7000 		 */
   7001 	}
   7002 
   7003 	/* Set the VLAN EtherType. */
   7004 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   7005 
   7006 	/*
   7007 	 * Set up the transmit control register; we start out with
   7008 	 * a collision distance suitable for FDX, but update it when
   7009 	 * we resolve the media type.
   7010 	 */
   7011 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   7012 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   7013 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7014 	if (sc->sc_type >= WM_T_82571)
   7015 		sc->sc_tctl |= TCTL_MULR;
   7016 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7017 
   7018 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7019 		/* Write TDT after TCTL.EN is set. See the document. */
   7020 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   7021 	}
   7022 
   7023 	if (sc->sc_type == WM_T_80003) {
   7024 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   7025 		reg &= ~TCTL_EXT_GCEX_MASK;
   7026 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   7027 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   7028 	}
   7029 
   7030 	/* Set the media. */
   7031 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   7032 		goto out;
   7033 
   7034 	/* Configure for OS presence */
   7035 	wm_init_manageability(sc);
   7036 
   7037 	/*
   7038 	 * Set up the receive control register; we actually program the
   7039 	 * register when we set the receive filter. Use multicast address
   7040 	 * offset type 0.
   7041 	 *
   7042 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   7043 	 * don't enable that feature.
   7044 	 */
   7045 	sc->sc_mchash_type = 0;
   7046 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   7047 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   7048 
   7049 	/* 82574 use one buffer extended Rx descriptor. */
   7050 	if (sc->sc_type == WM_T_82574)
   7051 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   7052 
   7053 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   7054 		sc->sc_rctl |= RCTL_SECRC;
   7055 
   7056 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   7057 	    && (ifp->if_mtu > ETHERMTU)) {
   7058 		sc->sc_rctl |= RCTL_LPE;
   7059 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7060 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   7061 	}
   7062 
   7063 	if (MCLBYTES == 2048)
   7064 		sc->sc_rctl |= RCTL_2k;
   7065 	else {
   7066 		if (sc->sc_type >= WM_T_82543) {
   7067 			switch (MCLBYTES) {
   7068 			case 4096:
   7069 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   7070 				break;
   7071 			case 8192:
   7072 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   7073 				break;
   7074 			case 16384:
   7075 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   7076 				break;
   7077 			default:
   7078 				panic("wm_init: MCLBYTES %d unsupported",
   7079 				    MCLBYTES);
   7080 				break;
   7081 			}
   7082 		} else
   7083 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   7084 	}
   7085 
   7086 	/* Enable ECC */
   7087 	switch (sc->sc_type) {
   7088 	case WM_T_82571:
   7089 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   7090 		reg |= PBA_ECC_CORR_EN;
   7091 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   7092 		break;
   7093 	case WM_T_PCH_LPT:
   7094 	case WM_T_PCH_SPT:
   7095 	case WM_T_PCH_CNP:
   7096 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   7097 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   7098 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   7099 
   7100 		sc->sc_ctrl |= CTRL_MEHE;
   7101 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7102 		break;
   7103 	default:
   7104 		break;
   7105 	}
   7106 
   7107 	/*
   7108 	 * Set the receive filter.
   7109 	 *
   7110 	 * For 82575 and 82576, the RX descriptors must be initialized after
   7111 	 * the setting of RCTL.EN in wm_set_filter()
   7112 	 */
   7113 	wm_set_filter(sc);
   7114 
   7115 	/* On 575 and later set RDT only if RX enabled */
   7116 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7117 		int qidx;
   7118 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7119 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   7120 			for (i = 0; i < WM_NRXDESC; i++) {
   7121 				mutex_enter(rxq->rxq_lock);
   7122 				wm_init_rxdesc(rxq, i);
   7123 				mutex_exit(rxq->rxq_lock);
   7124 
   7125 			}
   7126 		}
   7127 	}
   7128 
   7129 	wm_unset_stopping_flags(sc);
   7130 
   7131 	/* Start the one second link check clock. */
   7132 	callout_schedule(&sc->sc_tick_ch, hz);
   7133 
   7134 	/*
   7135 	 * ...all done! (IFNET_LOCKED asserted above.)
   7136 	 */
   7137 	ifp->if_flags |= IFF_RUNNING;
   7138 
   7139  out:
   7140 	/* Save last flags for the callback */
   7141 	sc->sc_if_flags = ifp->if_flags;
   7142 	sc->sc_ec_capenable = ec->ec_capenable;
   7143 	if (error)
   7144 		log(LOG_ERR, "%s: interface not running\n",
   7145 		    device_xname(sc->sc_dev));
   7146 	return error;
   7147 }
   7148 
   7149 /*
   7150  * wm_stop:		[ifnet interface function]
   7151  *
   7152  *	Stop transmission on the interface.
   7153  */
   7154 static void
   7155 wm_stop(struct ifnet *ifp, int disable)
   7156 {
   7157 	struct wm_softc *sc = ifp->if_softc;
   7158 
   7159 	ASSERT_SLEEPABLE();
   7160 	KASSERT(IFNET_LOCKED(ifp));
   7161 
   7162 	WM_CORE_LOCK(sc);
   7163 	wm_stop_locked(ifp, disable ? true : false, true);
   7164 	WM_CORE_UNLOCK(sc);
   7165 
   7166 	/*
   7167 	 * After wm_set_stopping_flags(), it is guaranteed that
   7168 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   7169 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   7170 	 * because it can sleep...
   7171 	 * so, call workqueue_wait() here.
   7172 	 */
   7173 	for (int i = 0; i < sc->sc_nqueues; i++)
   7174 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   7175 	workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
   7176 }
   7177 
   7178 static void
   7179 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   7180 {
   7181 	struct wm_softc *sc = ifp->if_softc;
   7182 	struct wm_txsoft *txs;
   7183 	int i, qidx;
   7184 
   7185 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7186 		device_xname(sc->sc_dev), __func__));
   7187 	KASSERT(IFNET_LOCKED(ifp));
   7188 	KASSERT(WM_CORE_LOCKED(sc));
   7189 
   7190 	wm_set_stopping_flags(sc);
   7191 
   7192 	if (sc->sc_flags & WM_F_HAS_MII) {
   7193 		/* Down the MII. */
   7194 		mii_down(&sc->sc_mii);
   7195 	} else {
   7196 #if 0
   7197 		/* Should we clear PHY's status properly? */
   7198 		wm_reset(sc);
   7199 #endif
   7200 	}
   7201 
   7202 	/* Stop the transmit and receive processes. */
   7203 	CSR_WRITE(sc, WMREG_TCTL, 0);
   7204 	CSR_WRITE(sc, WMREG_RCTL, 0);
   7205 	sc->sc_rctl &= ~RCTL_EN;
   7206 
   7207 	/*
   7208 	 * Clear the interrupt mask to ensure the device cannot assert its
   7209 	 * interrupt line.
   7210 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   7211 	 * service any currently pending or shared interrupt.
   7212 	 */
   7213 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7214 	sc->sc_icr = 0;
   7215 	if (wm_is_using_msix(sc)) {
   7216 		if (sc->sc_type != WM_T_82574) {
   7217 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   7218 			CSR_WRITE(sc, WMREG_EIAC, 0);
   7219 		} else
   7220 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   7221 	}
   7222 
   7223 	/*
   7224 	 * Stop callouts after interrupts are disabled; if we have
   7225 	 * to wait for them, we will be releasing the CORE_LOCK
   7226 	 * briefly, which will unblock interrupts on the current CPU.
   7227 	 */
   7228 
   7229 	/* Stop the one second clock. */
   7230 	if (wait)
   7231 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   7232 	else
   7233 		callout_stop(&sc->sc_tick_ch);
   7234 
   7235 	/* Stop the 82547 Tx FIFO stall check timer. */
   7236 	if (sc->sc_type == WM_T_82547) {
   7237 		if (wait)
   7238 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   7239 		else
   7240 			callout_stop(&sc->sc_txfifo_ch);
   7241 	}
   7242 
   7243 	/* Release any queued transmit buffers. */
   7244 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7245 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   7246 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7247 		struct mbuf *m;
   7248 
   7249 		mutex_enter(txq->txq_lock);
   7250 		txq->txq_sending = false; /* Ensure watchdog disabled */
   7251 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7252 			txs = &txq->txq_soft[i];
   7253 			if (txs->txs_mbuf != NULL) {
   7254 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   7255 				m_freem(txs->txs_mbuf);
   7256 				txs->txs_mbuf = NULL;
   7257 			}
   7258 		}
   7259 		/* Drain txq_interq */
   7260 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7261 			m_freem(m);
   7262 		mutex_exit(txq->txq_lock);
   7263 	}
   7264 
   7265 	/* Mark the interface as down and cancel the watchdog timer. */
   7266 	ifp->if_flags &= ~IFF_RUNNING;
   7267 	sc->sc_if_flags = ifp->if_flags;
   7268 
   7269 	if (disable) {
   7270 		for (i = 0; i < sc->sc_nqueues; i++) {
   7271 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7272 			mutex_enter(rxq->rxq_lock);
   7273 			wm_rxdrain(rxq);
   7274 			mutex_exit(rxq->rxq_lock);
   7275 		}
   7276 	}
   7277 
   7278 #if 0 /* notyet */
   7279 	if (sc->sc_type >= WM_T_82544)
   7280 		CSR_WRITE(sc, WMREG_WUC, 0);
   7281 #endif
   7282 }
   7283 
   7284 static void
   7285 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   7286 {
   7287 	struct mbuf *m;
   7288 	int i;
   7289 
   7290 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   7291 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   7292 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   7293 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   7294 		    m->m_data, m->m_len, m->m_flags);
   7295 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   7296 	    i, i == 1 ? "" : "s");
   7297 }
   7298 
   7299 /*
   7300  * wm_82547_txfifo_stall:
   7301  *
   7302  *	Callout used to wait for the 82547 Tx FIFO to drain,
   7303  *	reset the FIFO pointers, and restart packet transmission.
   7304  */
   7305 static void
   7306 wm_82547_txfifo_stall(void *arg)
   7307 {
   7308 	struct wm_softc *sc = arg;
   7309 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7310 
   7311 	mutex_enter(txq->txq_lock);
   7312 
   7313 	if (txq->txq_stopping)
   7314 		goto out;
   7315 
   7316 	if (txq->txq_fifo_stall) {
   7317 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   7318 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   7319 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   7320 			/*
   7321 			 * Packets have drained.  Stop transmitter, reset
   7322 			 * FIFO pointers, restart transmitter, and kick
   7323 			 * the packet queue.
   7324 			 */
   7325 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   7326 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   7327 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   7328 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   7329 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   7330 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   7331 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   7332 			CSR_WRITE_FLUSH(sc);
   7333 
   7334 			txq->txq_fifo_head = 0;
   7335 			txq->txq_fifo_stall = 0;
   7336 			wm_start_locked(&sc->sc_ethercom.ec_if);
   7337 		} else {
   7338 			/*
   7339 			 * Still waiting for packets to drain; try again in
   7340 			 * another tick.
   7341 			 */
   7342 			callout_schedule(&sc->sc_txfifo_ch, 1);
   7343 		}
   7344 	}
   7345 
   7346 out:
   7347 	mutex_exit(txq->txq_lock);
   7348 }
   7349 
   7350 /*
   7351  * wm_82547_txfifo_bugchk:
   7352  *
   7353  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   7354  *	prevent enqueueing a packet that would wrap around the end
   7355  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   7356  *
   7357  *	We do this by checking the amount of space before the end
   7358  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   7359  *	the Tx FIFO, wait for all remaining packets to drain, reset
   7360  *	the internal FIFO pointers to the beginning, and restart
   7361  *	transmission on the interface.
   7362  */
   7363 #define	WM_FIFO_HDR		0x10
   7364 #define	WM_82547_PAD_LEN	0x3e0
   7365 static int
   7366 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   7367 {
   7368 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7369 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   7370 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   7371 
   7372 	/* Just return if already stalled. */
   7373 	if (txq->txq_fifo_stall)
   7374 		return 1;
   7375 
   7376 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7377 		/* Stall only occurs in half-duplex mode. */
   7378 		goto send_packet;
   7379 	}
   7380 
   7381 	if (len >= WM_82547_PAD_LEN + space) {
   7382 		txq->txq_fifo_stall = 1;
   7383 		callout_schedule(&sc->sc_txfifo_ch, 1);
   7384 		return 1;
   7385 	}
   7386 
   7387  send_packet:
   7388 	txq->txq_fifo_head += len;
   7389 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   7390 		txq->txq_fifo_head -= txq->txq_fifo_size;
   7391 
   7392 	return 0;
   7393 }
   7394 
   7395 static int
   7396 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7397 {
   7398 	int error;
   7399 
   7400 	/*
   7401 	 * Allocate the control data structures, and create and load the
   7402 	 * DMA map for it.
   7403 	 *
   7404 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7405 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7406 	 * both sets within the same 4G segment.
   7407 	 */
   7408 	if (sc->sc_type < WM_T_82544)
   7409 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   7410 	else
   7411 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   7412 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7413 		txq->txq_descsize = sizeof(nq_txdesc_t);
   7414 	else
   7415 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   7416 
   7417 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   7418 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   7419 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   7420 		aprint_error_dev(sc->sc_dev,
   7421 		    "unable to allocate TX control data, error = %d\n",
   7422 		    error);
   7423 		goto fail_0;
   7424 	}
   7425 
   7426 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   7427 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   7428 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7429 		aprint_error_dev(sc->sc_dev,
   7430 		    "unable to map TX control data, error = %d\n", error);
   7431 		goto fail_1;
   7432 	}
   7433 
   7434 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   7435 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   7436 		aprint_error_dev(sc->sc_dev,
   7437 		    "unable to create TX control data DMA map, error = %d\n",
   7438 		    error);
   7439 		goto fail_2;
   7440 	}
   7441 
   7442 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   7443 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   7444 		aprint_error_dev(sc->sc_dev,
   7445 		    "unable to load TX control data DMA map, error = %d\n",
   7446 		    error);
   7447 		goto fail_3;
   7448 	}
   7449 
   7450 	return 0;
   7451 
   7452  fail_3:
   7453 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7454  fail_2:
   7455 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7456 	    WM_TXDESCS_SIZE(txq));
   7457  fail_1:
   7458 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7459  fail_0:
   7460 	return error;
   7461 }
   7462 
   7463 static void
   7464 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7465 {
   7466 
   7467 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   7468 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7469 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7470 	    WM_TXDESCS_SIZE(txq));
   7471 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7472 }
   7473 
   7474 static int
   7475 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7476 {
   7477 	int error;
   7478 	size_t rxq_descs_size;
   7479 
   7480 	/*
   7481 	 * Allocate the control data structures, and create and load the
   7482 	 * DMA map for it.
   7483 	 *
   7484 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7485 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7486 	 * both sets within the same 4G segment.
   7487 	 */
   7488 	rxq->rxq_ndesc = WM_NRXDESC;
   7489 	if (sc->sc_type == WM_T_82574)
   7490 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   7491 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7492 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   7493 	else
   7494 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   7495 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   7496 
   7497 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   7498 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   7499 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   7500 		aprint_error_dev(sc->sc_dev,
   7501 		    "unable to allocate RX control data, error = %d\n",
   7502 		    error);
   7503 		goto fail_0;
   7504 	}
   7505 
   7506 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   7507 		    rxq->rxq_desc_rseg, rxq_descs_size,
   7508 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7509 		aprint_error_dev(sc->sc_dev,
   7510 		    "unable to map RX control data, error = %d\n", error);
   7511 		goto fail_1;
   7512 	}
   7513 
   7514 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7515 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7516 		aprint_error_dev(sc->sc_dev,
   7517 		    "unable to create RX control data DMA map, error = %d\n",
   7518 		    error);
   7519 		goto fail_2;
   7520 	}
   7521 
   7522 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7523 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7524 		aprint_error_dev(sc->sc_dev,
   7525 		    "unable to load RX control data DMA map, error = %d\n",
   7526 		    error);
   7527 		goto fail_3;
   7528 	}
   7529 
   7530 	return 0;
   7531 
   7532  fail_3:
   7533 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7534  fail_2:
   7535 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7536 	    rxq_descs_size);
   7537  fail_1:
   7538 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7539  fail_0:
   7540 	return error;
   7541 }
   7542 
   7543 static void
   7544 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7545 {
   7546 
   7547 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7548 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7549 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7550 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7551 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7552 }
   7553 
   7554 
   7555 static int
   7556 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7557 {
   7558 	int i, error;
   7559 
   7560 	/* Create the transmit buffer DMA maps. */
   7561 	WM_TXQUEUELEN(txq) =
   7562 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7563 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7564 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7565 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7566 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7567 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7568 			aprint_error_dev(sc->sc_dev,
   7569 			    "unable to create Tx DMA map %d, error = %d\n",
   7570 			    i, error);
   7571 			goto fail;
   7572 		}
   7573 	}
   7574 
   7575 	return 0;
   7576 
   7577  fail:
   7578 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7579 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7580 			bus_dmamap_destroy(sc->sc_dmat,
   7581 			    txq->txq_soft[i].txs_dmamap);
   7582 	}
   7583 	return error;
   7584 }
   7585 
   7586 static void
   7587 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7588 {
   7589 	int i;
   7590 
   7591 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7592 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7593 			bus_dmamap_destroy(sc->sc_dmat,
   7594 			    txq->txq_soft[i].txs_dmamap);
   7595 	}
   7596 }
   7597 
   7598 static int
   7599 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7600 {
   7601 	int i, error;
   7602 
   7603 	/* Create the receive buffer DMA maps. */
   7604 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7605 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7606 			    MCLBYTES, 0, 0,
   7607 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7608 			aprint_error_dev(sc->sc_dev,
   7609 			    "unable to create Rx DMA map %d error = %d\n",
   7610 			    i, error);
   7611 			goto fail;
   7612 		}
   7613 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7614 	}
   7615 
   7616 	return 0;
   7617 
   7618  fail:
   7619 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7620 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7621 			bus_dmamap_destroy(sc->sc_dmat,
   7622 			    rxq->rxq_soft[i].rxs_dmamap);
   7623 	}
   7624 	return error;
   7625 }
   7626 
   7627 static void
   7628 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7629 {
   7630 	int i;
   7631 
   7632 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7633 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7634 			bus_dmamap_destroy(sc->sc_dmat,
   7635 			    rxq->rxq_soft[i].rxs_dmamap);
   7636 	}
   7637 }
   7638 
   7639 /*
   7640  * wm_alloc_quques:
   7641  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7642  */
   7643 static int
   7644 wm_alloc_txrx_queues(struct wm_softc *sc)
   7645 {
   7646 	int i, error, tx_done, rx_done;
   7647 
   7648 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7649 	    KM_SLEEP);
   7650 	if (sc->sc_queue == NULL) {
   7651 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7652 		error = ENOMEM;
   7653 		goto fail_0;
   7654 	}
   7655 
   7656 	/* For transmission */
   7657 	error = 0;
   7658 	tx_done = 0;
   7659 	for (i = 0; i < sc->sc_nqueues; i++) {
   7660 #ifdef WM_EVENT_COUNTERS
   7661 		int j;
   7662 		const char *xname;
   7663 #endif
   7664 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7665 		txq->txq_sc = sc;
   7666 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7667 
   7668 		error = wm_alloc_tx_descs(sc, txq);
   7669 		if (error)
   7670 			break;
   7671 		error = wm_alloc_tx_buffer(sc, txq);
   7672 		if (error) {
   7673 			wm_free_tx_descs(sc, txq);
   7674 			break;
   7675 		}
   7676 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7677 		if (txq->txq_interq == NULL) {
   7678 			wm_free_tx_descs(sc, txq);
   7679 			wm_free_tx_buffer(sc, txq);
   7680 			error = ENOMEM;
   7681 			break;
   7682 		}
   7683 
   7684 #ifdef WM_EVENT_COUNTERS
   7685 		xname = device_xname(sc->sc_dev);
   7686 
   7687 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7688 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7689 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7690 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7691 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7692 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7693 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7694 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7695 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7696 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7697 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7698 
   7699 		for (j = 0; j < WM_NTXSEGS; j++) {
   7700 			snprintf(txq->txq_txseg_evcnt_names[j],
   7701 			    sizeof(txq->txq_txseg_evcnt_names[j]),
   7702 			    "txq%02dtxseg%d", i, j);
   7703 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
   7704 			    EVCNT_TYPE_MISC,
   7705 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7706 		}
   7707 
   7708 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7709 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7710 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7711 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7712 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7713 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7714 #endif /* WM_EVENT_COUNTERS */
   7715 
   7716 		tx_done++;
   7717 	}
   7718 	if (error)
   7719 		goto fail_1;
   7720 
   7721 	/* For receive */
   7722 	error = 0;
   7723 	rx_done = 0;
   7724 	for (i = 0; i < sc->sc_nqueues; i++) {
   7725 #ifdef WM_EVENT_COUNTERS
   7726 		const char *xname;
   7727 #endif
   7728 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7729 		rxq->rxq_sc = sc;
   7730 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7731 
   7732 		error = wm_alloc_rx_descs(sc, rxq);
   7733 		if (error)
   7734 			break;
   7735 
   7736 		error = wm_alloc_rx_buffer(sc, rxq);
   7737 		if (error) {
   7738 			wm_free_rx_descs(sc, rxq);
   7739 			break;
   7740 		}
   7741 
   7742 #ifdef WM_EVENT_COUNTERS
   7743 		xname = device_xname(sc->sc_dev);
   7744 
   7745 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7746 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7747 
   7748 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7749 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7750 #endif /* WM_EVENT_COUNTERS */
   7751 
   7752 		rx_done++;
   7753 	}
   7754 	if (error)
   7755 		goto fail_2;
   7756 
   7757 	return 0;
   7758 
   7759  fail_2:
   7760 	for (i = 0; i < rx_done; i++) {
   7761 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7762 		wm_free_rx_buffer(sc, rxq);
   7763 		wm_free_rx_descs(sc, rxq);
   7764 		if (rxq->rxq_lock)
   7765 			mutex_obj_free(rxq->rxq_lock);
   7766 	}
   7767  fail_1:
   7768 	for (i = 0; i < tx_done; i++) {
   7769 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7770 		pcq_destroy(txq->txq_interq);
   7771 		wm_free_tx_buffer(sc, txq);
   7772 		wm_free_tx_descs(sc, txq);
   7773 		if (txq->txq_lock)
   7774 			mutex_obj_free(txq->txq_lock);
   7775 	}
   7776 
   7777 	kmem_free(sc->sc_queue,
   7778 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7779  fail_0:
   7780 	return error;
   7781 }
   7782 
   7783 /*
   7784  * wm_free_quques:
   7785  *	Free {tx,rx}descs and {tx,rx} buffers
   7786  */
   7787 static void
   7788 wm_free_txrx_queues(struct wm_softc *sc)
   7789 {
   7790 	int i;
   7791 
   7792 	for (i = 0; i < sc->sc_nqueues; i++) {
   7793 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7794 
   7795 #ifdef WM_EVENT_COUNTERS
   7796 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7797 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7798 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7799 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7800 #endif /* WM_EVENT_COUNTERS */
   7801 
   7802 		wm_free_rx_buffer(sc, rxq);
   7803 		wm_free_rx_descs(sc, rxq);
   7804 		if (rxq->rxq_lock)
   7805 			mutex_obj_free(rxq->rxq_lock);
   7806 	}
   7807 
   7808 	for (i = 0; i < sc->sc_nqueues; i++) {
   7809 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7810 		struct mbuf *m;
   7811 #ifdef WM_EVENT_COUNTERS
   7812 		int j;
   7813 
   7814 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7815 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7816 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7817 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7818 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7819 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7820 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7821 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7822 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7823 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7824 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7825 
   7826 		for (j = 0; j < WM_NTXSEGS; j++)
   7827 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7828 
   7829 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7830 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7831 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7832 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7833 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7834 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7835 #endif /* WM_EVENT_COUNTERS */
   7836 
   7837 		/* Drain txq_interq */
   7838 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7839 			m_freem(m);
   7840 		pcq_destroy(txq->txq_interq);
   7841 
   7842 		wm_free_tx_buffer(sc, txq);
   7843 		wm_free_tx_descs(sc, txq);
   7844 		if (txq->txq_lock)
   7845 			mutex_obj_free(txq->txq_lock);
   7846 	}
   7847 
   7848 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7849 }
   7850 
   7851 static void
   7852 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7853 {
   7854 
   7855 	KASSERT(mutex_owned(txq->txq_lock));
   7856 
   7857 	/* Initialize the transmit descriptor ring. */
   7858 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7859 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7860 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7861 	txq->txq_free = WM_NTXDESC(txq);
   7862 	txq->txq_next = 0;
   7863 }
   7864 
   7865 static void
   7866 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7867     struct wm_txqueue *txq)
   7868 {
   7869 
   7870 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7871 		device_xname(sc->sc_dev), __func__));
   7872 	KASSERT(mutex_owned(txq->txq_lock));
   7873 
   7874 	if (sc->sc_type < WM_T_82543) {
   7875 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7876 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7877 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7878 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7879 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7880 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7881 	} else {
   7882 		int qid = wmq->wmq_id;
   7883 
   7884 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7885 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7886 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7887 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7888 
   7889 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7890 			/*
   7891 			 * Don't write TDT before TCTL.EN is set.
   7892 			 * See the document.
   7893 			 */
   7894 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7895 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7896 			    | TXDCTL_WTHRESH(0));
   7897 		else {
   7898 			/* XXX should update with AIM? */
   7899 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7900 			if (sc->sc_type >= WM_T_82540) {
   7901 				/* Should be the same */
   7902 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7903 			}
   7904 
   7905 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7906 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7907 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7908 		}
   7909 	}
   7910 }
   7911 
   7912 static void
   7913 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7914 {
   7915 	int i;
   7916 
   7917 	KASSERT(mutex_owned(txq->txq_lock));
   7918 
   7919 	/* Initialize the transmit job descriptors. */
   7920 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7921 		txq->txq_soft[i].txs_mbuf = NULL;
   7922 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7923 	txq->txq_snext = 0;
   7924 	txq->txq_sdirty = 0;
   7925 }
   7926 
   7927 static void
   7928 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7929     struct wm_txqueue *txq)
   7930 {
   7931 
   7932 	KASSERT(mutex_owned(txq->txq_lock));
   7933 
   7934 	/*
   7935 	 * Set up some register offsets that are different between
   7936 	 * the i82542 and the i82543 and later chips.
   7937 	 */
   7938 	if (sc->sc_type < WM_T_82543)
   7939 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7940 	else
   7941 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7942 
   7943 	wm_init_tx_descs(sc, txq);
   7944 	wm_init_tx_regs(sc, wmq, txq);
   7945 	wm_init_tx_buffer(sc, txq);
   7946 
   7947 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
   7948 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
   7949 
   7950 	txq->txq_sending = false;
   7951 }
   7952 
   7953 static void
   7954 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7955     struct wm_rxqueue *rxq)
   7956 {
   7957 
   7958 	KASSERT(mutex_owned(rxq->rxq_lock));
   7959 
   7960 	/*
   7961 	 * Initialize the receive descriptor and receive job
   7962 	 * descriptor rings.
   7963 	 */
   7964 	if (sc->sc_type < WM_T_82543) {
   7965 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7966 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7967 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7968 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7969 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7970 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7971 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7972 
   7973 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7974 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7975 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7976 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7977 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7978 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7979 	} else {
   7980 		int qid = wmq->wmq_id;
   7981 
   7982 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7983 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7984 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7985 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7986 
   7987 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7988 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7989 				panic("%s: MCLBYTES %d unsupported for 82575 "
   7990 				    "or higher\n", __func__, MCLBYTES);
   7991 
   7992 			/*
   7993 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
   7994 			 * only.
   7995 			 */
   7996 			CSR_WRITE(sc, WMREG_SRRCTL(qid),
   7997 			    SRRCTL_DESCTYPE_ADV_ONEBUF
   7998 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7999 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   8000 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   8001 			    | RXDCTL_WTHRESH(1));
   8002 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   8003 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   8004 		} else {
   8005 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   8006 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   8007 			/* XXX should update with AIM? */
   8008 			CSR_WRITE(sc, WMREG_RDTR,
   8009 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   8010 			/* MUST be same */
   8011 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   8012 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   8013 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   8014 		}
   8015 	}
   8016 }
   8017 
   8018 static int
   8019 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   8020 {
   8021 	struct wm_rxsoft *rxs;
   8022 	int error, i;
   8023 
   8024 	KASSERT(mutex_owned(rxq->rxq_lock));
   8025 
   8026 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8027 		rxs = &rxq->rxq_soft[i];
   8028 		if (rxs->rxs_mbuf == NULL) {
   8029 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   8030 				log(LOG_ERR, "%s: unable to allocate or map "
   8031 				    "rx buffer %d, error = %d\n",
   8032 				    device_xname(sc->sc_dev), i, error);
   8033 				/*
   8034 				 * XXX Should attempt to run with fewer receive
   8035 				 * XXX buffers instead of just failing.
   8036 				 */
   8037 				wm_rxdrain(rxq);
   8038 				return ENOMEM;
   8039 			}
   8040 		} else {
   8041 			/*
   8042 			 * For 82575 and 82576, the RX descriptors must be
   8043 			 * initialized after the setting of RCTL.EN in
   8044 			 * wm_set_filter()
   8045 			 */
   8046 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   8047 				wm_init_rxdesc(rxq, i);
   8048 		}
   8049 	}
   8050 	rxq->rxq_ptr = 0;
   8051 	rxq->rxq_discard = 0;
   8052 	WM_RXCHAIN_RESET(rxq);
   8053 
   8054 	return 0;
   8055 }
   8056 
   8057 static int
   8058 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8059     struct wm_rxqueue *rxq)
   8060 {
   8061 
   8062 	KASSERT(mutex_owned(rxq->rxq_lock));
   8063 
   8064 	/*
   8065 	 * Set up some register offsets that are different between
   8066 	 * the i82542 and the i82543 and later chips.
   8067 	 */
   8068 	if (sc->sc_type < WM_T_82543)
   8069 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   8070 	else
   8071 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   8072 
   8073 	wm_init_rx_regs(sc, wmq, rxq);
   8074 	return wm_init_rx_buffer(sc, rxq);
   8075 }
   8076 
   8077 /*
   8078  * wm_init_quques:
   8079  *	Initialize {tx,rx}descs and {tx,rx} buffers
   8080  */
   8081 static int
   8082 wm_init_txrx_queues(struct wm_softc *sc)
   8083 {
   8084 	int i, error = 0;
   8085 
   8086 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8087 		device_xname(sc->sc_dev), __func__));
   8088 
   8089 	for (i = 0; i < sc->sc_nqueues; i++) {
   8090 		struct wm_queue *wmq = &sc->sc_queue[i];
   8091 		struct wm_txqueue *txq = &wmq->wmq_txq;
   8092 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8093 
   8094 		/*
   8095 		 * TODO
   8096 		 * Currently, use constant variable instead of AIM.
   8097 		 * Furthermore, the interrupt interval of multiqueue which use
   8098 		 * polling mode is less than default value.
   8099 		 * More tuning and AIM are required.
   8100 		 */
   8101 		if (wm_is_using_multiqueue(sc))
   8102 			wmq->wmq_itr = 50;
   8103 		else
   8104 			wmq->wmq_itr = sc->sc_itr_init;
   8105 		wmq->wmq_set_itr = true;
   8106 
   8107 		mutex_enter(txq->txq_lock);
   8108 		wm_init_tx_queue(sc, wmq, txq);
   8109 		mutex_exit(txq->txq_lock);
   8110 
   8111 		mutex_enter(rxq->rxq_lock);
   8112 		error = wm_init_rx_queue(sc, wmq, rxq);
   8113 		mutex_exit(rxq->rxq_lock);
   8114 		if (error)
   8115 			break;
   8116 	}
   8117 
   8118 	return error;
   8119 }
   8120 
   8121 /*
   8122  * wm_tx_offload:
   8123  *
   8124  *	Set up TCP/IP checksumming parameters for the
   8125  *	specified packet.
   8126  */
   8127 static void
   8128 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8129     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   8130 {
   8131 	struct mbuf *m0 = txs->txs_mbuf;
   8132 	struct livengood_tcpip_ctxdesc *t;
   8133 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   8134 	uint32_t ipcse;
   8135 	struct ether_header *eh;
   8136 	int offset, iphl;
   8137 	uint8_t fields;
   8138 
   8139 	/*
   8140 	 * XXX It would be nice if the mbuf pkthdr had offset
   8141 	 * fields for the protocol headers.
   8142 	 */
   8143 
   8144 	eh = mtod(m0, struct ether_header *);
   8145 	switch (htons(eh->ether_type)) {
   8146 	case ETHERTYPE_IP:
   8147 	case ETHERTYPE_IPV6:
   8148 		offset = ETHER_HDR_LEN;
   8149 		break;
   8150 
   8151 	case ETHERTYPE_VLAN:
   8152 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8153 		break;
   8154 
   8155 	default:
   8156 		/* Don't support this protocol or encapsulation. */
   8157 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8158 		txq->txq_last_hw_ipcs = 0;
   8159 		txq->txq_last_hw_tucs = 0;
   8160 		*fieldsp = 0;
   8161 		*cmdp = 0;
   8162 		return;
   8163 	}
   8164 
   8165 	if ((m0->m_pkthdr.csum_flags &
   8166 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8167 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8168 	} else
   8169 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8170 
   8171 	ipcse = offset + iphl - 1;
   8172 
   8173 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   8174 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   8175 	seg = 0;
   8176 	fields = 0;
   8177 
   8178 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8179 		int hlen = offset + iphl;
   8180 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8181 
   8182 		if (__predict_false(m0->m_len <
   8183 				    (hlen + sizeof(struct tcphdr)))) {
   8184 			/*
   8185 			 * TCP/IP headers are not in the first mbuf; we need
   8186 			 * to do this the slow and painful way. Let's just
   8187 			 * hope this doesn't happen very often.
   8188 			 */
   8189 			struct tcphdr th;
   8190 
   8191 			WM_Q_EVCNT_INCR(txq, tsopain);
   8192 
   8193 			m_copydata(m0, hlen, sizeof(th), &th);
   8194 			if (v4) {
   8195 				struct ip ip;
   8196 
   8197 				m_copydata(m0, offset, sizeof(ip), &ip);
   8198 				ip.ip_len = 0;
   8199 				m_copyback(m0,
   8200 				    offset + offsetof(struct ip, ip_len),
   8201 				    sizeof(ip.ip_len), &ip.ip_len);
   8202 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8203 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8204 			} else {
   8205 				struct ip6_hdr ip6;
   8206 
   8207 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8208 				ip6.ip6_plen = 0;
   8209 				m_copyback(m0,
   8210 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8211 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8212 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8213 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8214 			}
   8215 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8216 			    sizeof(th.th_sum), &th.th_sum);
   8217 
   8218 			hlen += th.th_off << 2;
   8219 		} else {
   8220 			/*
   8221 			 * TCP/IP headers are in the first mbuf; we can do
   8222 			 * this the easy way.
   8223 			 */
   8224 			struct tcphdr *th;
   8225 
   8226 			if (v4) {
   8227 				struct ip *ip =
   8228 				    (void *)(mtod(m0, char *) + offset);
   8229 				th = (void *)(mtod(m0, char *) + hlen);
   8230 
   8231 				ip->ip_len = 0;
   8232 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8233 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8234 			} else {
   8235 				struct ip6_hdr *ip6 =
   8236 				    (void *)(mtod(m0, char *) + offset);
   8237 				th = (void *)(mtod(m0, char *) + hlen);
   8238 
   8239 				ip6->ip6_plen = 0;
   8240 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8241 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8242 			}
   8243 			hlen += th->th_off << 2;
   8244 		}
   8245 
   8246 		if (v4) {
   8247 			WM_Q_EVCNT_INCR(txq, tso);
   8248 			cmdlen |= WTX_TCPIP_CMD_IP;
   8249 		} else {
   8250 			WM_Q_EVCNT_INCR(txq, tso6);
   8251 			ipcse = 0;
   8252 		}
   8253 		cmd |= WTX_TCPIP_CMD_TSE;
   8254 		cmdlen |= WTX_TCPIP_CMD_TSE |
   8255 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   8256 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   8257 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   8258 	}
   8259 
   8260 	/*
   8261 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   8262 	 * offload feature, if we load the context descriptor, we
   8263 	 * MUST provide valid values for IPCSS and TUCSS fields.
   8264 	 */
   8265 
   8266 	ipcs = WTX_TCPIP_IPCSS(offset) |
   8267 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   8268 	    WTX_TCPIP_IPCSE(ipcse);
   8269 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   8270 		WM_Q_EVCNT_INCR(txq, ipsum);
   8271 		fields |= WTX_IXSM;
   8272 	}
   8273 
   8274 	offset += iphl;
   8275 
   8276 	if (m0->m_pkthdr.csum_flags &
   8277 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   8278 		WM_Q_EVCNT_INCR(txq, tusum);
   8279 		fields |= WTX_TXSM;
   8280 		tucs = WTX_TCPIP_TUCSS(offset) |
   8281 		    WTX_TCPIP_TUCSO(offset +
   8282 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   8283 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8284 	} else if ((m0->m_pkthdr.csum_flags &
   8285 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   8286 		WM_Q_EVCNT_INCR(txq, tusum6);
   8287 		fields |= WTX_TXSM;
   8288 		tucs = WTX_TCPIP_TUCSS(offset) |
   8289 		    WTX_TCPIP_TUCSO(offset +
   8290 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   8291 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8292 	} else {
   8293 		/* Just initialize it to a valid TCP context. */
   8294 		tucs = WTX_TCPIP_TUCSS(offset) |
   8295 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   8296 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8297 	}
   8298 
   8299 	*cmdp = cmd;
   8300 	*fieldsp = fields;
   8301 
   8302 	/*
   8303 	 * We don't have to write context descriptor for every packet
   8304 	 * except for 82574. For 82574, we must write context descriptor
   8305 	 * for every packet when we use two descriptor queues.
   8306 	 *
   8307 	 * The 82574L can only remember the *last* context used
   8308 	 * regardless of queue that it was use for.  We cannot reuse
   8309 	 * contexts on this hardware platform and must generate a new
   8310 	 * context every time.  82574L hardware spec, section 7.2.6,
   8311 	 * second note.
   8312 	 */
   8313 	if (sc->sc_nqueues < 2) {
   8314 		/*
   8315 		 * Setting up new checksum offload context for every
   8316 		 * frames takes a lot of processing time for hardware.
   8317 		 * This also reduces performance a lot for small sized
   8318 		 * frames so avoid it if driver can use previously
   8319 		 * configured checksum offload context.
   8320 		 * For TSO, in theory we can use the same TSO context only if
   8321 		 * frame is the same type(IP/TCP) and the same MSS. However
   8322 		 * checking whether a frame has the same IP/TCP structure is a
   8323 		 * hard thing so just ignore that and always restablish a
   8324 		 * new TSO context.
   8325 		 */
   8326 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   8327 		    == 0) {
   8328 			if (txq->txq_last_hw_cmd == cmd &&
   8329 			    txq->txq_last_hw_fields == fields &&
   8330 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   8331 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   8332 				WM_Q_EVCNT_INCR(txq, skipcontext);
   8333 				return;
   8334 			}
   8335 		}
   8336 
   8337 		txq->txq_last_hw_cmd = cmd;
   8338 		txq->txq_last_hw_fields = fields;
   8339 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   8340 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   8341 	}
   8342 
   8343 	/* Fill in the context descriptor. */
   8344 	t = (struct livengood_tcpip_ctxdesc *)
   8345 	    &txq->txq_descs[txq->txq_next];
   8346 	t->tcpip_ipcs = htole32(ipcs);
   8347 	t->tcpip_tucs = htole32(tucs);
   8348 	t->tcpip_cmdlen = htole32(cmdlen);
   8349 	t->tcpip_seg = htole32(seg);
   8350 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8351 
   8352 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8353 	txs->txs_ndesc++;
   8354 }
   8355 
   8356 static inline int
   8357 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   8358 {
   8359 	struct wm_softc *sc = ifp->if_softc;
   8360 	u_int cpuid = cpu_index(curcpu());
   8361 
   8362 	/*
   8363 	 * Currently, simple distribute strategy.
   8364 	 * TODO:
   8365 	 * distribute by flowid(RSS has value).
   8366 	 */
   8367 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   8368 }
   8369 
   8370 static inline bool
   8371 wm_linkdown_discard(struct wm_txqueue *txq)
   8372 {
   8373 
   8374 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   8375 		return true;
   8376 
   8377 	return false;
   8378 }
   8379 
   8380 /*
   8381  * wm_start:		[ifnet interface function]
   8382  *
   8383  *	Start packet transmission on the interface.
   8384  */
   8385 static void
   8386 wm_start(struct ifnet *ifp)
   8387 {
   8388 	struct wm_softc *sc = ifp->if_softc;
   8389 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8390 
   8391 #ifdef WM_MPSAFE
   8392 	KASSERT(if_is_mpsafe(ifp));
   8393 #endif
   8394 	/*
   8395 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8396 	 */
   8397 
   8398 	mutex_enter(txq->txq_lock);
   8399 	if (!txq->txq_stopping)
   8400 		wm_start_locked(ifp);
   8401 	mutex_exit(txq->txq_lock);
   8402 }
   8403 
   8404 static void
   8405 wm_start_locked(struct ifnet *ifp)
   8406 {
   8407 	struct wm_softc *sc = ifp->if_softc;
   8408 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8409 
   8410 	wm_send_common_locked(ifp, txq, false);
   8411 }
   8412 
   8413 static int
   8414 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   8415 {
   8416 	int qid;
   8417 	struct wm_softc *sc = ifp->if_softc;
   8418 	struct wm_txqueue *txq;
   8419 
   8420 	qid = wm_select_txqueue(ifp, m);
   8421 	txq = &sc->sc_queue[qid].wmq_txq;
   8422 
   8423 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8424 		m_freem(m);
   8425 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8426 		return ENOBUFS;
   8427 	}
   8428 
   8429 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8430 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8431 	if (m->m_flags & M_MCAST)
   8432 		if_statinc_ref(nsr, if_omcasts);
   8433 	IF_STAT_PUTREF(ifp);
   8434 
   8435 	if (mutex_tryenter(txq->txq_lock)) {
   8436 		if (!txq->txq_stopping)
   8437 			wm_transmit_locked(ifp, txq);
   8438 		mutex_exit(txq->txq_lock);
   8439 	}
   8440 
   8441 	return 0;
   8442 }
   8443 
   8444 static void
   8445 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8446 {
   8447 
   8448 	wm_send_common_locked(ifp, txq, true);
   8449 }
   8450 
   8451 static void
   8452 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8453     bool is_transmit)
   8454 {
   8455 	struct wm_softc *sc = ifp->if_softc;
   8456 	struct mbuf *m0;
   8457 	struct wm_txsoft *txs;
   8458 	bus_dmamap_t dmamap;
   8459 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   8460 	bus_addr_t curaddr;
   8461 	bus_size_t seglen, curlen;
   8462 	uint32_t cksumcmd;
   8463 	uint8_t cksumfields;
   8464 	bool remap = true;
   8465 
   8466 	KASSERT(mutex_owned(txq->txq_lock));
   8467 	KASSERT(!txq->txq_stopping);
   8468 
   8469 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8470 		return;
   8471 
   8472 	if (__predict_false(wm_linkdown_discard(txq))) {
   8473 		do {
   8474 			if (is_transmit)
   8475 				m0 = pcq_get(txq->txq_interq);
   8476 			else
   8477 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8478 			/*
   8479 			 * increment successed packet counter as in the case
   8480 			 * which the packet is discarded by link down PHY.
   8481 			 */
   8482 			if (m0 != NULL) {
   8483 				if_statinc(ifp, if_opackets);
   8484 				m_freem(m0);
   8485 			}
   8486 		} while (m0 != NULL);
   8487 		return;
   8488 	}
   8489 
   8490 	/* Remember the previous number of free descriptors. */
   8491 	ofree = txq->txq_free;
   8492 
   8493 	/*
   8494 	 * Loop through the send queue, setting up transmit descriptors
   8495 	 * until we drain the queue, or use up all available transmit
   8496 	 * descriptors.
   8497 	 */
   8498 	for (;;) {
   8499 		m0 = NULL;
   8500 
   8501 		/* Get a work queue entry. */
   8502 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8503 			wm_txeof(txq, UINT_MAX);
   8504 			if (txq->txq_sfree == 0) {
   8505 				DPRINTF(sc, WM_DEBUG_TX,
   8506 				    ("%s: TX: no free job descriptors\n",
   8507 					device_xname(sc->sc_dev)));
   8508 				WM_Q_EVCNT_INCR(txq, txsstall);
   8509 				break;
   8510 			}
   8511 		}
   8512 
   8513 		/* Grab a packet off the queue. */
   8514 		if (is_transmit)
   8515 			m0 = pcq_get(txq->txq_interq);
   8516 		else
   8517 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8518 		if (m0 == NULL)
   8519 			break;
   8520 
   8521 		DPRINTF(sc, WM_DEBUG_TX,
   8522 		    ("%s: TX: have packet to transmit: %p\n",
   8523 			device_xname(sc->sc_dev), m0));
   8524 
   8525 		txs = &txq->txq_soft[txq->txq_snext];
   8526 		dmamap = txs->txs_dmamap;
   8527 
   8528 		use_tso = (m0->m_pkthdr.csum_flags &
   8529 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8530 
   8531 		/*
   8532 		 * So says the Linux driver:
   8533 		 * The controller does a simple calculation to make sure
   8534 		 * there is enough room in the FIFO before initiating the
   8535 		 * DMA for each buffer. The calc is:
   8536 		 *	4 = ceil(buffer len / MSS)
   8537 		 * To make sure we don't overrun the FIFO, adjust the max
   8538 		 * buffer len if the MSS drops.
   8539 		 */
   8540 		dmamap->dm_maxsegsz =
   8541 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8542 		    ? m0->m_pkthdr.segsz << 2
   8543 		    : WTX_MAX_LEN;
   8544 
   8545 		/*
   8546 		 * Load the DMA map.  If this fails, the packet either
   8547 		 * didn't fit in the allotted number of segments, or we
   8548 		 * were short on resources.  For the too-many-segments
   8549 		 * case, we simply report an error and drop the packet,
   8550 		 * since we can't sanely copy a jumbo packet to a single
   8551 		 * buffer.
   8552 		 */
   8553 retry:
   8554 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8555 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8556 		if (__predict_false(error)) {
   8557 			if (error == EFBIG) {
   8558 				if (remap == true) {
   8559 					struct mbuf *m;
   8560 
   8561 					remap = false;
   8562 					m = m_defrag(m0, M_NOWAIT);
   8563 					if (m != NULL) {
   8564 						WM_Q_EVCNT_INCR(txq, defrag);
   8565 						m0 = m;
   8566 						goto retry;
   8567 					}
   8568 				}
   8569 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8570 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8571 				    "DMA segments, dropping...\n",
   8572 				    device_xname(sc->sc_dev));
   8573 				wm_dump_mbuf_chain(sc, m0);
   8574 				m_freem(m0);
   8575 				continue;
   8576 			}
   8577 			/* Short on resources, just stop for now. */
   8578 			DPRINTF(sc, WM_DEBUG_TX,
   8579 			    ("%s: TX: dmamap load failed: %d\n",
   8580 				device_xname(sc->sc_dev), error));
   8581 			break;
   8582 		}
   8583 
   8584 		segs_needed = dmamap->dm_nsegs;
   8585 		if (use_tso) {
   8586 			/* For sentinel descriptor; see below. */
   8587 			segs_needed++;
   8588 		}
   8589 
   8590 		/*
   8591 		 * Ensure we have enough descriptors free to describe
   8592 		 * the packet. Note, we always reserve one descriptor
   8593 		 * at the end of the ring due to the semantics of the
   8594 		 * TDT register, plus one more in the event we need
   8595 		 * to load offload context.
   8596 		 */
   8597 		if (segs_needed > txq->txq_free - 2) {
   8598 			/*
   8599 			 * Not enough free descriptors to transmit this
   8600 			 * packet.  We haven't committed anything yet,
   8601 			 * so just unload the DMA map, put the packet
   8602 			 * pack on the queue, and punt. Notify the upper
   8603 			 * layer that there are no more slots left.
   8604 			 */
   8605 			DPRINTF(sc, WM_DEBUG_TX,
   8606 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8607 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8608 				segs_needed, txq->txq_free - 1));
   8609 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8610 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8611 			WM_Q_EVCNT_INCR(txq, txdstall);
   8612 			break;
   8613 		}
   8614 
   8615 		/*
   8616 		 * Check for 82547 Tx FIFO bug. We need to do this
   8617 		 * once we know we can transmit the packet, since we
   8618 		 * do some internal FIFO space accounting here.
   8619 		 */
   8620 		if (sc->sc_type == WM_T_82547 &&
   8621 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8622 			DPRINTF(sc, WM_DEBUG_TX,
   8623 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8624 				device_xname(sc->sc_dev)));
   8625 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8626 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8627 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8628 			break;
   8629 		}
   8630 
   8631 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8632 
   8633 		DPRINTF(sc, WM_DEBUG_TX,
   8634 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8635 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8636 
   8637 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8638 
   8639 		/*
   8640 		 * Store a pointer to the packet so that we can free it
   8641 		 * later.
   8642 		 *
   8643 		 * Initially, we consider the number of descriptors the
   8644 		 * packet uses the number of DMA segments.  This may be
   8645 		 * incremented by 1 if we do checksum offload (a descriptor
   8646 		 * is used to set the checksum context).
   8647 		 */
   8648 		txs->txs_mbuf = m0;
   8649 		txs->txs_firstdesc = txq->txq_next;
   8650 		txs->txs_ndesc = segs_needed;
   8651 
   8652 		/* Set up offload parameters for this packet. */
   8653 		if (m0->m_pkthdr.csum_flags &
   8654 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8655 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8656 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8657 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8658 		} else {
   8659 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8660 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8661 			cksumcmd = 0;
   8662 			cksumfields = 0;
   8663 		}
   8664 
   8665 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8666 
   8667 		/* Sync the DMA map. */
   8668 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8669 		    BUS_DMASYNC_PREWRITE);
   8670 
   8671 		/* Initialize the transmit descriptor. */
   8672 		for (nexttx = txq->txq_next, seg = 0;
   8673 		     seg < dmamap->dm_nsegs; seg++) {
   8674 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8675 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8676 			     seglen != 0;
   8677 			     curaddr += curlen, seglen -= curlen,
   8678 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8679 				curlen = seglen;
   8680 
   8681 				/*
   8682 				 * So says the Linux driver:
   8683 				 * Work around for premature descriptor
   8684 				 * write-backs in TSO mode.  Append a
   8685 				 * 4-byte sentinel descriptor.
   8686 				 */
   8687 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8688 				    curlen > 8)
   8689 					curlen -= 4;
   8690 
   8691 				wm_set_dma_addr(
   8692 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8693 				txq->txq_descs[nexttx].wtx_cmdlen
   8694 				    = htole32(cksumcmd | curlen);
   8695 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8696 				    = 0;
   8697 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8698 				    = cksumfields;
   8699 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8700 				lasttx = nexttx;
   8701 
   8702 				DPRINTF(sc, WM_DEBUG_TX,
   8703 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8704 					"len %#04zx\n",
   8705 					device_xname(sc->sc_dev), nexttx,
   8706 					(uint64_t)curaddr, curlen));
   8707 			}
   8708 		}
   8709 
   8710 		KASSERT(lasttx != -1);
   8711 
   8712 		/*
   8713 		 * Set up the command byte on the last descriptor of
   8714 		 * the packet. If we're in the interrupt delay window,
   8715 		 * delay the interrupt.
   8716 		 */
   8717 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8718 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8719 
   8720 		/*
   8721 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8722 		 * up the descriptor to encapsulate the packet for us.
   8723 		 *
   8724 		 * This is only valid on the last descriptor of the packet.
   8725 		 */
   8726 		if (vlan_has_tag(m0)) {
   8727 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8728 			    htole32(WTX_CMD_VLE);
   8729 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8730 			    = htole16(vlan_get_tag(m0));
   8731 		}
   8732 
   8733 		txs->txs_lastdesc = lasttx;
   8734 
   8735 		DPRINTF(sc, WM_DEBUG_TX,
   8736 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8737 			device_xname(sc->sc_dev),
   8738 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8739 
   8740 		/* Sync the descriptors we're using. */
   8741 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8742 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8743 
   8744 		/* Give the packet to the chip. */
   8745 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8746 
   8747 		DPRINTF(sc, WM_DEBUG_TX,
   8748 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8749 
   8750 		DPRINTF(sc, WM_DEBUG_TX,
   8751 		    ("%s: TX: finished transmitting packet, job %d\n",
   8752 			device_xname(sc->sc_dev), txq->txq_snext));
   8753 
   8754 		/* Advance the tx pointer. */
   8755 		txq->txq_free -= txs->txs_ndesc;
   8756 		txq->txq_next = nexttx;
   8757 
   8758 		txq->txq_sfree--;
   8759 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8760 
   8761 		/* Pass the packet to any BPF listeners. */
   8762 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8763 	}
   8764 
   8765 	if (m0 != NULL) {
   8766 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8767 		WM_Q_EVCNT_INCR(txq, descdrop);
   8768 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8769 			__func__));
   8770 		m_freem(m0);
   8771 	}
   8772 
   8773 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8774 		/* No more slots; notify upper layer. */
   8775 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8776 	}
   8777 
   8778 	if (txq->txq_free != ofree) {
   8779 		/* Set a watchdog timer in case the chip flakes out. */
   8780 		txq->txq_lastsent = time_uptime;
   8781 		txq->txq_sending = true;
   8782 	}
   8783 }
   8784 
   8785 /*
   8786  * wm_nq_tx_offload:
   8787  *
   8788  *	Set up TCP/IP checksumming parameters for the
   8789  *	specified packet, for NEWQUEUE devices
   8790  */
   8791 static void
   8792 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8793     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8794 {
   8795 	struct mbuf *m0 = txs->txs_mbuf;
   8796 	uint32_t vl_len, mssidx, cmdc;
   8797 	struct ether_header *eh;
   8798 	int offset, iphl;
   8799 
   8800 	/*
   8801 	 * XXX It would be nice if the mbuf pkthdr had offset
   8802 	 * fields for the protocol headers.
   8803 	 */
   8804 	*cmdlenp = 0;
   8805 	*fieldsp = 0;
   8806 
   8807 	eh = mtod(m0, struct ether_header *);
   8808 	switch (htons(eh->ether_type)) {
   8809 	case ETHERTYPE_IP:
   8810 	case ETHERTYPE_IPV6:
   8811 		offset = ETHER_HDR_LEN;
   8812 		break;
   8813 
   8814 	case ETHERTYPE_VLAN:
   8815 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8816 		break;
   8817 
   8818 	default:
   8819 		/* Don't support this protocol or encapsulation. */
   8820 		*do_csum = false;
   8821 		return;
   8822 	}
   8823 	*do_csum = true;
   8824 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8825 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8826 
   8827 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8828 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8829 
   8830 	if ((m0->m_pkthdr.csum_flags &
   8831 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8832 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8833 	} else {
   8834 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8835 	}
   8836 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8837 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8838 
   8839 	if (vlan_has_tag(m0)) {
   8840 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8841 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8842 		*cmdlenp |= NQTX_CMD_VLE;
   8843 	}
   8844 
   8845 	mssidx = 0;
   8846 
   8847 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8848 		int hlen = offset + iphl;
   8849 		int tcp_hlen;
   8850 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8851 
   8852 		if (__predict_false(m0->m_len <
   8853 				    (hlen + sizeof(struct tcphdr)))) {
   8854 			/*
   8855 			 * TCP/IP headers are not in the first mbuf; we need
   8856 			 * to do this the slow and painful way. Let's just
   8857 			 * hope this doesn't happen very often.
   8858 			 */
   8859 			struct tcphdr th;
   8860 
   8861 			WM_Q_EVCNT_INCR(txq, tsopain);
   8862 
   8863 			m_copydata(m0, hlen, sizeof(th), &th);
   8864 			if (v4) {
   8865 				struct ip ip;
   8866 
   8867 				m_copydata(m0, offset, sizeof(ip), &ip);
   8868 				ip.ip_len = 0;
   8869 				m_copyback(m0,
   8870 				    offset + offsetof(struct ip, ip_len),
   8871 				    sizeof(ip.ip_len), &ip.ip_len);
   8872 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8873 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8874 			} else {
   8875 				struct ip6_hdr ip6;
   8876 
   8877 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8878 				ip6.ip6_plen = 0;
   8879 				m_copyback(m0,
   8880 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8881 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8882 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8883 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8884 			}
   8885 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8886 			    sizeof(th.th_sum), &th.th_sum);
   8887 
   8888 			tcp_hlen = th.th_off << 2;
   8889 		} else {
   8890 			/*
   8891 			 * TCP/IP headers are in the first mbuf; we can do
   8892 			 * this the easy way.
   8893 			 */
   8894 			struct tcphdr *th;
   8895 
   8896 			if (v4) {
   8897 				struct ip *ip =
   8898 				    (void *)(mtod(m0, char *) + offset);
   8899 				th = (void *)(mtod(m0, char *) + hlen);
   8900 
   8901 				ip->ip_len = 0;
   8902 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8903 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8904 			} else {
   8905 				struct ip6_hdr *ip6 =
   8906 				    (void *)(mtod(m0, char *) + offset);
   8907 				th = (void *)(mtod(m0, char *) + hlen);
   8908 
   8909 				ip6->ip6_plen = 0;
   8910 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8911 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8912 			}
   8913 			tcp_hlen = th->th_off << 2;
   8914 		}
   8915 		hlen += tcp_hlen;
   8916 		*cmdlenp |= NQTX_CMD_TSE;
   8917 
   8918 		if (v4) {
   8919 			WM_Q_EVCNT_INCR(txq, tso);
   8920 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8921 		} else {
   8922 			WM_Q_EVCNT_INCR(txq, tso6);
   8923 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8924 		}
   8925 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8926 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8927 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8928 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8929 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8930 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8931 	} else {
   8932 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8933 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8934 	}
   8935 
   8936 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8937 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8938 		cmdc |= NQTXC_CMD_IP4;
   8939 	}
   8940 
   8941 	if (m0->m_pkthdr.csum_flags &
   8942 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8943 		WM_Q_EVCNT_INCR(txq, tusum);
   8944 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8945 			cmdc |= NQTXC_CMD_TCP;
   8946 		else
   8947 			cmdc |= NQTXC_CMD_UDP;
   8948 
   8949 		cmdc |= NQTXC_CMD_IP4;
   8950 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8951 	}
   8952 	if (m0->m_pkthdr.csum_flags &
   8953 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8954 		WM_Q_EVCNT_INCR(txq, tusum6);
   8955 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8956 			cmdc |= NQTXC_CMD_TCP;
   8957 		else
   8958 			cmdc |= NQTXC_CMD_UDP;
   8959 
   8960 		cmdc |= NQTXC_CMD_IP6;
   8961 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8962 	}
   8963 
   8964 	/*
   8965 	 * We don't have to write context descriptor for every packet to
   8966 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8967 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8968 	 * controllers.
   8969 	 * It would be overhead to write context descriptor for every packet,
   8970 	 * however it does not cause problems.
   8971 	 */
   8972 	/* Fill in the context descriptor. */
   8973 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8974 	    htole32(vl_len);
   8975 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8976 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8977 	    htole32(cmdc);
   8978 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8979 	    htole32(mssidx);
   8980 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8981 	DPRINTF(sc, WM_DEBUG_TX,
   8982 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8983 		txq->txq_next, 0, vl_len));
   8984 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8985 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8986 	txs->txs_ndesc++;
   8987 }
   8988 
   8989 /*
   8990  * wm_nq_start:		[ifnet interface function]
   8991  *
   8992  *	Start packet transmission on the interface for NEWQUEUE devices
   8993  */
   8994 static void
   8995 wm_nq_start(struct ifnet *ifp)
   8996 {
   8997 	struct wm_softc *sc = ifp->if_softc;
   8998 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8999 
   9000 #ifdef WM_MPSAFE
   9001 	KASSERT(if_is_mpsafe(ifp));
   9002 #endif
   9003 	/*
   9004 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   9005 	 */
   9006 
   9007 	mutex_enter(txq->txq_lock);
   9008 	if (!txq->txq_stopping)
   9009 		wm_nq_start_locked(ifp);
   9010 	mutex_exit(txq->txq_lock);
   9011 }
   9012 
   9013 static void
   9014 wm_nq_start_locked(struct ifnet *ifp)
   9015 {
   9016 	struct wm_softc *sc = ifp->if_softc;
   9017 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   9018 
   9019 	wm_nq_send_common_locked(ifp, txq, false);
   9020 }
   9021 
   9022 static int
   9023 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   9024 {
   9025 	int qid;
   9026 	struct wm_softc *sc = ifp->if_softc;
   9027 	struct wm_txqueue *txq;
   9028 
   9029 	qid = wm_select_txqueue(ifp, m);
   9030 	txq = &sc->sc_queue[qid].wmq_txq;
   9031 
   9032 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   9033 		m_freem(m);
   9034 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   9035 		return ENOBUFS;
   9036 	}
   9037 
   9038 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   9039 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   9040 	if (m->m_flags & M_MCAST)
   9041 		if_statinc_ref(nsr, if_omcasts);
   9042 	IF_STAT_PUTREF(ifp);
   9043 
   9044 	/*
   9045 	 * The situations which this mutex_tryenter() fails at running time
   9046 	 * are below two patterns.
   9047 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   9048 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   9049 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   9050 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   9051 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   9052 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   9053 	 * stuck, either.
   9054 	 */
   9055 	if (mutex_tryenter(txq->txq_lock)) {
   9056 		if (!txq->txq_stopping)
   9057 			wm_nq_transmit_locked(ifp, txq);
   9058 		mutex_exit(txq->txq_lock);
   9059 	}
   9060 
   9061 	return 0;
   9062 }
   9063 
   9064 static void
   9065 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   9066 {
   9067 
   9068 	wm_nq_send_common_locked(ifp, txq, true);
   9069 }
   9070 
   9071 static void
   9072 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   9073     bool is_transmit)
   9074 {
   9075 	struct wm_softc *sc = ifp->if_softc;
   9076 	struct mbuf *m0;
   9077 	struct wm_txsoft *txs;
   9078 	bus_dmamap_t dmamap;
   9079 	int error, nexttx, lasttx = -1, seg, segs_needed;
   9080 	bool do_csum, sent;
   9081 	bool remap = true;
   9082 
   9083 	KASSERT(mutex_owned(txq->txq_lock));
   9084 	KASSERT(!txq->txq_stopping);
   9085 
   9086 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   9087 		return;
   9088 
   9089 	if (__predict_false(wm_linkdown_discard(txq))) {
   9090 		do {
   9091 			if (is_transmit)
   9092 				m0 = pcq_get(txq->txq_interq);
   9093 			else
   9094 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   9095 			/*
   9096 			 * increment successed packet counter as in the case
   9097 			 * which the packet is discarded by link down PHY.
   9098 			 */
   9099 			if (m0 != NULL) {
   9100 				if_statinc(ifp, if_opackets);
   9101 				m_freem(m0);
   9102 			}
   9103 		} while (m0 != NULL);
   9104 		return;
   9105 	}
   9106 
   9107 	sent = false;
   9108 
   9109 	/*
   9110 	 * Loop through the send queue, setting up transmit descriptors
   9111 	 * until we drain the queue, or use up all available transmit
   9112 	 * descriptors.
   9113 	 */
   9114 	for (;;) {
   9115 		m0 = NULL;
   9116 
   9117 		/* Get a work queue entry. */
   9118 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   9119 			wm_txeof(txq, UINT_MAX);
   9120 			if (txq->txq_sfree == 0) {
   9121 				DPRINTF(sc, WM_DEBUG_TX,
   9122 				    ("%s: TX: no free job descriptors\n",
   9123 					device_xname(sc->sc_dev)));
   9124 				WM_Q_EVCNT_INCR(txq, txsstall);
   9125 				break;
   9126 			}
   9127 		}
   9128 
   9129 		/* Grab a packet off the queue. */
   9130 		if (is_transmit)
   9131 			m0 = pcq_get(txq->txq_interq);
   9132 		else
   9133 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   9134 		if (m0 == NULL)
   9135 			break;
   9136 
   9137 		DPRINTF(sc, WM_DEBUG_TX,
   9138 		    ("%s: TX: have packet to transmit: %p\n",
   9139 		    device_xname(sc->sc_dev), m0));
   9140 
   9141 		txs = &txq->txq_soft[txq->txq_snext];
   9142 		dmamap = txs->txs_dmamap;
   9143 
   9144 		/*
   9145 		 * Load the DMA map.  If this fails, the packet either
   9146 		 * didn't fit in the allotted number of segments, or we
   9147 		 * were short on resources.  For the too-many-segments
   9148 		 * case, we simply report an error and drop the packet,
   9149 		 * since we can't sanely copy a jumbo packet to a single
   9150 		 * buffer.
   9151 		 */
   9152 retry:
   9153 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   9154 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   9155 		if (__predict_false(error)) {
   9156 			if (error == EFBIG) {
   9157 				if (remap == true) {
   9158 					struct mbuf *m;
   9159 
   9160 					remap = false;
   9161 					m = m_defrag(m0, M_NOWAIT);
   9162 					if (m != NULL) {
   9163 						WM_Q_EVCNT_INCR(txq, defrag);
   9164 						m0 = m;
   9165 						goto retry;
   9166 					}
   9167 				}
   9168 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   9169 				log(LOG_ERR, "%s: Tx packet consumes too many "
   9170 				    "DMA segments, dropping...\n",
   9171 				    device_xname(sc->sc_dev));
   9172 				wm_dump_mbuf_chain(sc, m0);
   9173 				m_freem(m0);
   9174 				continue;
   9175 			}
   9176 			/* Short on resources, just stop for now. */
   9177 			DPRINTF(sc, WM_DEBUG_TX,
   9178 			    ("%s: TX: dmamap load failed: %d\n",
   9179 				device_xname(sc->sc_dev), error));
   9180 			break;
   9181 		}
   9182 
   9183 		segs_needed = dmamap->dm_nsegs;
   9184 
   9185 		/*
   9186 		 * Ensure we have enough descriptors free to describe
   9187 		 * the packet. Note, we always reserve one descriptor
   9188 		 * at the end of the ring due to the semantics of the
   9189 		 * TDT register, plus one more in the event we need
   9190 		 * to load offload context.
   9191 		 */
   9192 		if (segs_needed > txq->txq_free - 2) {
   9193 			/*
   9194 			 * Not enough free descriptors to transmit this
   9195 			 * packet.  We haven't committed anything yet,
   9196 			 * so just unload the DMA map, put the packet
   9197 			 * pack on the queue, and punt. Notify the upper
   9198 			 * layer that there are no more slots left.
   9199 			 */
   9200 			DPRINTF(sc, WM_DEBUG_TX,
   9201 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9202 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9203 				segs_needed, txq->txq_free - 1));
   9204 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9205 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9206 			WM_Q_EVCNT_INCR(txq, txdstall);
   9207 			break;
   9208 		}
   9209 
   9210 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9211 
   9212 		DPRINTF(sc, WM_DEBUG_TX,
   9213 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9214 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9215 
   9216 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9217 
   9218 		/*
   9219 		 * Store a pointer to the packet so that we can free it
   9220 		 * later.
   9221 		 *
   9222 		 * Initially, we consider the number of descriptors the
   9223 		 * packet uses the number of DMA segments.  This may be
   9224 		 * incremented by 1 if we do checksum offload (a descriptor
   9225 		 * is used to set the checksum context).
   9226 		 */
   9227 		txs->txs_mbuf = m0;
   9228 		txs->txs_firstdesc = txq->txq_next;
   9229 		txs->txs_ndesc = segs_needed;
   9230 
   9231 		/* Set up offload parameters for this packet. */
   9232 		uint32_t cmdlen, fields, dcmdlen;
   9233 		if (m0->m_pkthdr.csum_flags &
   9234 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9235 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9236 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9237 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   9238 			    &do_csum);
   9239 		} else {
   9240 			do_csum = false;
   9241 			cmdlen = 0;
   9242 			fields = 0;
   9243 		}
   9244 
   9245 		/* Sync the DMA map. */
   9246 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9247 		    BUS_DMASYNC_PREWRITE);
   9248 
   9249 		/* Initialize the first transmit descriptor. */
   9250 		nexttx = txq->txq_next;
   9251 		if (!do_csum) {
   9252 			/* Set up a legacy descriptor */
   9253 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   9254 			    dmamap->dm_segs[0].ds_addr);
   9255 			txq->txq_descs[nexttx].wtx_cmdlen =
   9256 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   9257 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   9258 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   9259 			if (vlan_has_tag(m0)) {
   9260 				txq->txq_descs[nexttx].wtx_cmdlen |=
   9261 				    htole32(WTX_CMD_VLE);
   9262 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   9263 				    htole16(vlan_get_tag(m0));
   9264 			} else
   9265 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9266 
   9267 			dcmdlen = 0;
   9268 		} else {
   9269 			/* Set up an advanced data descriptor */
   9270 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9271 			    htole64(dmamap->dm_segs[0].ds_addr);
   9272 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   9273 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9274 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   9275 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   9276 			    htole32(fields);
   9277 			DPRINTF(sc, WM_DEBUG_TX,
   9278 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   9279 				device_xname(sc->sc_dev), nexttx,
   9280 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   9281 			DPRINTF(sc, WM_DEBUG_TX,
   9282 			    ("\t 0x%08x%08x\n", fields,
   9283 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   9284 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   9285 		}
   9286 
   9287 		lasttx = nexttx;
   9288 		nexttx = WM_NEXTTX(txq, nexttx);
   9289 		/*
   9290 		 * Fill in the next descriptors. Legacy or advanced format
   9291 		 * is the same here.
   9292 		 */
   9293 		for (seg = 1; seg < dmamap->dm_nsegs;
   9294 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   9295 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9296 			    htole64(dmamap->dm_segs[seg].ds_addr);
   9297 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9298 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   9299 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   9300 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   9301 			lasttx = nexttx;
   9302 
   9303 			DPRINTF(sc, WM_DEBUG_TX,
   9304 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   9305 				device_xname(sc->sc_dev), nexttx,
   9306 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   9307 				dmamap->dm_segs[seg].ds_len));
   9308 		}
   9309 
   9310 		KASSERT(lasttx != -1);
   9311 
   9312 		/*
   9313 		 * Set up the command byte on the last descriptor of
   9314 		 * the packet. If we're in the interrupt delay window,
   9315 		 * delay the interrupt.
   9316 		 */
   9317 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   9318 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   9319 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9320 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9321 
   9322 		txs->txs_lastdesc = lasttx;
   9323 
   9324 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9325 		    device_xname(sc->sc_dev),
   9326 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9327 
   9328 		/* Sync the descriptors we're using. */
   9329 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9330 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9331 
   9332 		/* Give the packet to the chip. */
   9333 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9334 		sent = true;
   9335 
   9336 		DPRINTF(sc, WM_DEBUG_TX,
   9337 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9338 
   9339 		DPRINTF(sc, WM_DEBUG_TX,
   9340 		    ("%s: TX: finished transmitting packet, job %d\n",
   9341 			device_xname(sc->sc_dev), txq->txq_snext));
   9342 
   9343 		/* Advance the tx pointer. */
   9344 		txq->txq_free -= txs->txs_ndesc;
   9345 		txq->txq_next = nexttx;
   9346 
   9347 		txq->txq_sfree--;
   9348 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9349 
   9350 		/* Pass the packet to any BPF listeners. */
   9351 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9352 	}
   9353 
   9354 	if (m0 != NULL) {
   9355 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9356 		WM_Q_EVCNT_INCR(txq, descdrop);
   9357 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9358 			__func__));
   9359 		m_freem(m0);
   9360 	}
   9361 
   9362 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9363 		/* No more slots; notify upper layer. */
   9364 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9365 	}
   9366 
   9367 	if (sent) {
   9368 		/* Set a watchdog timer in case the chip flakes out. */
   9369 		txq->txq_lastsent = time_uptime;
   9370 		txq->txq_sending = true;
   9371 	}
   9372 }
   9373 
   9374 static void
   9375 wm_deferred_start_locked(struct wm_txqueue *txq)
   9376 {
   9377 	struct wm_softc *sc = txq->txq_sc;
   9378 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9379 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   9380 	int qid = wmq->wmq_id;
   9381 
   9382 	KASSERT(mutex_owned(txq->txq_lock));
   9383 	KASSERT(!txq->txq_stopping);
   9384 
   9385 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   9386 		/* XXX need for ALTQ or one CPU system */
   9387 		if (qid == 0)
   9388 			wm_nq_start_locked(ifp);
   9389 		wm_nq_transmit_locked(ifp, txq);
   9390 	} else {
   9391 		/* XXX need for ALTQ or one CPU system */
   9392 		if (qid == 0)
   9393 			wm_start_locked(ifp);
   9394 		wm_transmit_locked(ifp, txq);
   9395 	}
   9396 }
   9397 
   9398 /* Interrupt */
   9399 
   9400 /*
   9401  * wm_txeof:
   9402  *
   9403  *	Helper; handle transmit interrupts.
   9404  */
   9405 static bool
   9406 wm_txeof(struct wm_txqueue *txq, u_int limit)
   9407 {
   9408 	struct wm_softc *sc = txq->txq_sc;
   9409 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9410 	struct wm_txsoft *txs;
   9411 	int count = 0;
   9412 	int i;
   9413 	uint8_t status;
   9414 	bool more = false;
   9415 
   9416 	KASSERT(mutex_owned(txq->txq_lock));
   9417 
   9418 	if (txq->txq_stopping)
   9419 		return false;
   9420 
   9421 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   9422 
   9423 	/*
   9424 	 * Go through the Tx list and free mbufs for those
   9425 	 * frames which have been transmitted.
   9426 	 */
   9427 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   9428 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   9429 		txs = &txq->txq_soft[i];
   9430 
   9431 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   9432 			device_xname(sc->sc_dev), i));
   9433 
   9434 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   9435 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9436 
   9437 		status =
   9438 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   9439 		if ((status & WTX_ST_DD) == 0) {
   9440 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   9441 			    BUS_DMASYNC_PREREAD);
   9442 			break;
   9443 		}
   9444 
   9445 		if (limit-- == 0) {
   9446 			more = true;
   9447 			DPRINTF(sc, WM_DEBUG_TX,
   9448 			    ("%s: TX: loop limited, job %d is not processed\n",
   9449 				device_xname(sc->sc_dev), i));
   9450 			break;
   9451 		}
   9452 
   9453 		count++;
   9454 		DPRINTF(sc, WM_DEBUG_TX,
   9455 		    ("%s: TX: job %d done: descs %d..%d\n",
   9456 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   9457 		    txs->txs_lastdesc));
   9458 
   9459 		/*
   9460 		 * XXX We should probably be using the statistics
   9461 		 * XXX registers, but I don't know if they exist
   9462 		 * XXX on chips before the i82544.
   9463 		 */
   9464 
   9465 #ifdef WM_EVENT_COUNTERS
   9466 		if (status & WTX_ST_TU)
   9467 			WM_Q_EVCNT_INCR(txq, underrun);
   9468 #endif /* WM_EVENT_COUNTERS */
   9469 
   9470 		/*
   9471 		 * 82574 and newer's document says the status field has neither
   9472 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   9473 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   9474 		 * Developer's Manual", 82574 datasheet and newer.
   9475 		 *
   9476 		 * XXX I saw the LC bit was set on I218 even though the media
   9477 		 * was full duplex, so the bit might be used for other
   9478 		 * meaning ...(I have no document).
   9479 		 */
   9480 
   9481 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   9482 		    && ((sc->sc_type < WM_T_82574)
   9483 			|| (sc->sc_type == WM_T_80003))) {
   9484 			if_statinc(ifp, if_oerrors);
   9485 			if (status & WTX_ST_LC)
   9486 				log(LOG_WARNING, "%s: late collision\n",
   9487 				    device_xname(sc->sc_dev));
   9488 			else if (status & WTX_ST_EC) {
   9489 				if_statadd(ifp, if_collisions,
   9490 				    TX_COLLISION_THRESHOLD + 1);
   9491 				log(LOG_WARNING, "%s: excessive collisions\n",
   9492 				    device_xname(sc->sc_dev));
   9493 			}
   9494 		} else
   9495 			if_statinc(ifp, if_opackets);
   9496 
   9497 		txq->txq_packets++;
   9498 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   9499 
   9500 		txq->txq_free += txs->txs_ndesc;
   9501 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   9502 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   9503 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   9504 		m_freem(txs->txs_mbuf);
   9505 		txs->txs_mbuf = NULL;
   9506 	}
   9507 
   9508 	/* Update the dirty transmit buffer pointer. */
   9509 	txq->txq_sdirty = i;
   9510 	DPRINTF(sc, WM_DEBUG_TX,
   9511 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   9512 
   9513 	if (count != 0)
   9514 		rnd_add_uint32(&sc->rnd_source, count);
   9515 
   9516 	/*
   9517 	 * If there are no more pending transmissions, cancel the watchdog
   9518 	 * timer.
   9519 	 */
   9520 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9521 		txq->txq_sending = false;
   9522 
   9523 	return more;
   9524 }
   9525 
   9526 static inline uint32_t
   9527 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9528 {
   9529 	struct wm_softc *sc = rxq->rxq_sc;
   9530 
   9531 	if (sc->sc_type == WM_T_82574)
   9532 		return EXTRXC_STATUS(
   9533 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9534 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9535 		return NQRXC_STATUS(
   9536 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9537 	else
   9538 		return rxq->rxq_descs[idx].wrx_status;
   9539 }
   9540 
   9541 static inline uint32_t
   9542 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9543 {
   9544 	struct wm_softc *sc = rxq->rxq_sc;
   9545 
   9546 	if (sc->sc_type == WM_T_82574)
   9547 		return EXTRXC_ERROR(
   9548 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9549 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9550 		return NQRXC_ERROR(
   9551 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9552 	else
   9553 		return rxq->rxq_descs[idx].wrx_errors;
   9554 }
   9555 
   9556 static inline uint16_t
   9557 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9558 {
   9559 	struct wm_softc *sc = rxq->rxq_sc;
   9560 
   9561 	if (sc->sc_type == WM_T_82574)
   9562 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9563 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9564 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9565 	else
   9566 		return rxq->rxq_descs[idx].wrx_special;
   9567 }
   9568 
   9569 static inline int
   9570 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9571 {
   9572 	struct wm_softc *sc = rxq->rxq_sc;
   9573 
   9574 	if (sc->sc_type == WM_T_82574)
   9575 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9576 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9577 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9578 	else
   9579 		return rxq->rxq_descs[idx].wrx_len;
   9580 }
   9581 
   9582 #ifdef WM_DEBUG
   9583 static inline uint32_t
   9584 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9585 {
   9586 	struct wm_softc *sc = rxq->rxq_sc;
   9587 
   9588 	if (sc->sc_type == WM_T_82574)
   9589 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9590 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9591 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9592 	else
   9593 		return 0;
   9594 }
   9595 
   9596 static inline uint8_t
   9597 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9598 {
   9599 	struct wm_softc *sc = rxq->rxq_sc;
   9600 
   9601 	if (sc->sc_type == WM_T_82574)
   9602 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9603 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9604 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9605 	else
   9606 		return 0;
   9607 }
   9608 #endif /* WM_DEBUG */
   9609 
   9610 static inline bool
   9611 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9612     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9613 {
   9614 
   9615 	if (sc->sc_type == WM_T_82574)
   9616 		return (status & ext_bit) != 0;
   9617 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9618 		return (status & nq_bit) != 0;
   9619 	else
   9620 		return (status & legacy_bit) != 0;
   9621 }
   9622 
   9623 static inline bool
   9624 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9625     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9626 {
   9627 
   9628 	if (sc->sc_type == WM_T_82574)
   9629 		return (error & ext_bit) != 0;
   9630 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9631 		return (error & nq_bit) != 0;
   9632 	else
   9633 		return (error & legacy_bit) != 0;
   9634 }
   9635 
   9636 static inline bool
   9637 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9638 {
   9639 
   9640 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9641 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9642 		return true;
   9643 	else
   9644 		return false;
   9645 }
   9646 
   9647 static inline bool
   9648 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9649 {
   9650 	struct wm_softc *sc = rxq->rxq_sc;
   9651 
   9652 	/* XXX missing error bit for newqueue? */
   9653 	if (wm_rxdesc_is_set_error(sc, errors,
   9654 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9655 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9656 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9657 		NQRXC_ERROR_RXE)) {
   9658 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9659 		    EXTRXC_ERROR_SE, 0))
   9660 			log(LOG_WARNING, "%s: symbol error\n",
   9661 			    device_xname(sc->sc_dev));
   9662 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9663 		    EXTRXC_ERROR_SEQ, 0))
   9664 			log(LOG_WARNING, "%s: receive sequence error\n",
   9665 			    device_xname(sc->sc_dev));
   9666 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9667 		    EXTRXC_ERROR_CE, 0))
   9668 			log(LOG_WARNING, "%s: CRC error\n",
   9669 			    device_xname(sc->sc_dev));
   9670 		return true;
   9671 	}
   9672 
   9673 	return false;
   9674 }
   9675 
   9676 static inline bool
   9677 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9678 {
   9679 	struct wm_softc *sc = rxq->rxq_sc;
   9680 
   9681 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9682 		NQRXC_STATUS_DD)) {
   9683 		/* We have processed all of the receive descriptors. */
   9684 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9685 		return false;
   9686 	}
   9687 
   9688 	return true;
   9689 }
   9690 
   9691 static inline bool
   9692 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9693     uint16_t vlantag, struct mbuf *m)
   9694 {
   9695 
   9696 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9697 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9698 		vlan_set_tag(m, le16toh(vlantag));
   9699 	}
   9700 
   9701 	return true;
   9702 }
   9703 
   9704 static inline void
   9705 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9706     uint32_t errors, struct mbuf *m)
   9707 {
   9708 	struct wm_softc *sc = rxq->rxq_sc;
   9709 
   9710 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9711 		if (wm_rxdesc_is_set_status(sc, status,
   9712 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9713 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9714 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9715 			if (wm_rxdesc_is_set_error(sc, errors,
   9716 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9717 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9718 		}
   9719 		if (wm_rxdesc_is_set_status(sc, status,
   9720 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9721 			/*
   9722 			 * Note: we don't know if this was TCP or UDP,
   9723 			 * so we just set both bits, and expect the
   9724 			 * upper layers to deal.
   9725 			 */
   9726 			WM_Q_EVCNT_INCR(rxq, tusum);
   9727 			m->m_pkthdr.csum_flags |=
   9728 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9729 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9730 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9731 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9732 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9733 		}
   9734 	}
   9735 }
   9736 
   9737 /*
   9738  * wm_rxeof:
   9739  *
   9740  *	Helper; handle receive interrupts.
   9741  */
   9742 static bool
   9743 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9744 {
   9745 	struct wm_softc *sc = rxq->rxq_sc;
   9746 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9747 	struct wm_rxsoft *rxs;
   9748 	struct mbuf *m;
   9749 	int i, len;
   9750 	int count = 0;
   9751 	uint32_t status, errors;
   9752 	uint16_t vlantag;
   9753 	bool more = false;
   9754 
   9755 	KASSERT(mutex_owned(rxq->rxq_lock));
   9756 
   9757 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9758 		rxs = &rxq->rxq_soft[i];
   9759 
   9760 		DPRINTF(sc, WM_DEBUG_RX,
   9761 		    ("%s: RX: checking descriptor %d\n",
   9762 			device_xname(sc->sc_dev), i));
   9763 		wm_cdrxsync(rxq, i,
   9764 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9765 
   9766 		status = wm_rxdesc_get_status(rxq, i);
   9767 		errors = wm_rxdesc_get_errors(rxq, i);
   9768 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9769 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9770 #ifdef WM_DEBUG
   9771 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9772 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9773 #endif
   9774 
   9775 		if (!wm_rxdesc_dd(rxq, i, status))
   9776 			break;
   9777 
   9778 		if (limit-- == 0) {
   9779 			more = true;
   9780 			DPRINTF(sc, WM_DEBUG_RX,
   9781 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9782 				device_xname(sc->sc_dev), i));
   9783 			break;
   9784 		}
   9785 
   9786 		count++;
   9787 		if (__predict_false(rxq->rxq_discard)) {
   9788 			DPRINTF(sc, WM_DEBUG_RX,
   9789 			    ("%s: RX: discarding contents of descriptor %d\n",
   9790 				device_xname(sc->sc_dev), i));
   9791 			wm_init_rxdesc(rxq, i);
   9792 			if (wm_rxdesc_is_eop(rxq, status)) {
   9793 				/* Reset our state. */
   9794 				DPRINTF(sc, WM_DEBUG_RX,
   9795 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9796 					device_xname(sc->sc_dev)));
   9797 				rxq->rxq_discard = 0;
   9798 			}
   9799 			continue;
   9800 		}
   9801 
   9802 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9803 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9804 
   9805 		m = rxs->rxs_mbuf;
   9806 
   9807 		/*
   9808 		 * Add a new receive buffer to the ring, unless of
   9809 		 * course the length is zero. Treat the latter as a
   9810 		 * failed mapping.
   9811 		 */
   9812 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9813 			/*
   9814 			 * Failed, throw away what we've done so
   9815 			 * far, and discard the rest of the packet.
   9816 			 */
   9817 			if_statinc(ifp, if_ierrors);
   9818 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9819 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9820 			wm_init_rxdesc(rxq, i);
   9821 			if (!wm_rxdesc_is_eop(rxq, status))
   9822 				rxq->rxq_discard = 1;
   9823 			if (rxq->rxq_head != NULL)
   9824 				m_freem(rxq->rxq_head);
   9825 			WM_RXCHAIN_RESET(rxq);
   9826 			DPRINTF(sc, WM_DEBUG_RX,
   9827 			    ("%s: RX: Rx buffer allocation failed, "
   9828 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9829 				rxq->rxq_discard ? " (discard)" : ""));
   9830 			continue;
   9831 		}
   9832 
   9833 		m->m_len = len;
   9834 		rxq->rxq_len += len;
   9835 		DPRINTF(sc, WM_DEBUG_RX,
   9836 		    ("%s: RX: buffer at %p len %d\n",
   9837 			device_xname(sc->sc_dev), m->m_data, len));
   9838 
   9839 		/* If this is not the end of the packet, keep looking. */
   9840 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9841 			WM_RXCHAIN_LINK(rxq, m);
   9842 			DPRINTF(sc, WM_DEBUG_RX,
   9843 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9844 				device_xname(sc->sc_dev), rxq->rxq_len));
   9845 			continue;
   9846 		}
   9847 
   9848 		/*
   9849 		 * Okay, we have the entire packet now. The chip is
   9850 		 * configured to include the FCS except I35[04], I21[01].
   9851 		 * (not all chips can be configured to strip it), so we need
   9852 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9853 		 * in RCTL register is always set, so we don't trim it.
   9854 		 * PCH2 and newer chip also not include FCS when jumbo
   9855 		 * frame is used to do workaround an errata.
   9856 		 * May need to adjust length of previous mbuf in the
   9857 		 * chain if the current mbuf is too short.
   9858 		 */
   9859 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9860 			if (m->m_len < ETHER_CRC_LEN) {
   9861 				rxq->rxq_tail->m_len
   9862 				    -= (ETHER_CRC_LEN - m->m_len);
   9863 				m->m_len = 0;
   9864 			} else
   9865 				m->m_len -= ETHER_CRC_LEN;
   9866 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9867 		} else
   9868 			len = rxq->rxq_len;
   9869 
   9870 		WM_RXCHAIN_LINK(rxq, m);
   9871 
   9872 		*rxq->rxq_tailp = NULL;
   9873 		m = rxq->rxq_head;
   9874 
   9875 		WM_RXCHAIN_RESET(rxq);
   9876 
   9877 		DPRINTF(sc, WM_DEBUG_RX,
   9878 		    ("%s: RX: have entire packet, len -> %d\n",
   9879 			device_xname(sc->sc_dev), len));
   9880 
   9881 		/* If an error occurred, update stats and drop the packet. */
   9882 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9883 			m_freem(m);
   9884 			continue;
   9885 		}
   9886 
   9887 		/* No errors.  Receive the packet. */
   9888 		m_set_rcvif(m, ifp);
   9889 		m->m_pkthdr.len = len;
   9890 		/*
   9891 		 * TODO
   9892 		 * should be save rsshash and rsstype to this mbuf.
   9893 		 */
   9894 		DPRINTF(sc, WM_DEBUG_RX,
   9895 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9896 			device_xname(sc->sc_dev), rsstype, rsshash));
   9897 
   9898 		/*
   9899 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9900 		 * for us.  Associate the tag with the packet.
   9901 		 */
   9902 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9903 			continue;
   9904 
   9905 		/* Set up checksum info for this packet. */
   9906 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9907 
   9908 		rxq->rxq_packets++;
   9909 		rxq->rxq_bytes += len;
   9910 		/* Pass it on. */
   9911 		if_percpuq_enqueue(sc->sc_ipq, m);
   9912 
   9913 		if (rxq->rxq_stopping)
   9914 			break;
   9915 	}
   9916 	rxq->rxq_ptr = i;
   9917 
   9918 	if (count != 0)
   9919 		rnd_add_uint32(&sc->rnd_source, count);
   9920 
   9921 	DPRINTF(sc, WM_DEBUG_RX,
   9922 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9923 
   9924 	return more;
   9925 }
   9926 
   9927 /*
   9928  * wm_linkintr_gmii:
   9929  *
   9930  *	Helper; handle link interrupts for GMII.
   9931  */
   9932 static void
   9933 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9934 {
   9935 	device_t dev = sc->sc_dev;
   9936 	uint32_t status, reg;
   9937 	bool link;
   9938 	int rv;
   9939 
   9940 	KASSERT(WM_CORE_LOCKED(sc));
   9941 
   9942 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9943 		__func__));
   9944 
   9945 	if ((icr & ICR_LSC) == 0) {
   9946 		if (icr & ICR_RXSEQ)
   9947 			DPRINTF(sc, WM_DEBUG_LINK,
   9948 			    ("%s: LINK Receive sequence error\n",
   9949 				device_xname(dev)));
   9950 		return;
   9951 	}
   9952 
   9953 	/* Link status changed */
   9954 	status = CSR_READ(sc, WMREG_STATUS);
   9955 	link = status & STATUS_LU;
   9956 	if (link) {
   9957 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9958 			device_xname(dev),
   9959 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9960 		if (wm_phy_need_linkdown_discard(sc)) {
   9961 			DPRINTF(sc, WM_DEBUG_LINK,
   9962 			    ("%s: linkintr: Clear linkdown discard flag\n",
   9963 				device_xname(dev)));
   9964 			wm_clear_linkdown_discard(sc);
   9965 		}
   9966 	} else {
   9967 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9968 			device_xname(dev)));
   9969 		if (wm_phy_need_linkdown_discard(sc)) {
   9970 			DPRINTF(sc, WM_DEBUG_LINK,
   9971 			    ("%s: linkintr: Set linkdown discard flag\n",
   9972 				device_xname(dev)));
   9973 			wm_set_linkdown_discard(sc);
   9974 		}
   9975 	}
   9976 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9977 		wm_gig_downshift_workaround_ich8lan(sc);
   9978 
   9979 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
   9980 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9981 
   9982 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9983 		device_xname(dev)));
   9984 	mii_pollstat(&sc->sc_mii);
   9985 	if (sc->sc_type == WM_T_82543) {
   9986 		int miistatus, active;
   9987 
   9988 		/*
   9989 		 * With 82543, we need to force speed and
   9990 		 * duplex on the MAC equal to what the PHY
   9991 		 * speed and duplex configuration is.
   9992 		 */
   9993 		miistatus = sc->sc_mii.mii_media_status;
   9994 
   9995 		if (miistatus & IFM_ACTIVE) {
   9996 			active = sc->sc_mii.mii_media_active;
   9997 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9998 			switch (IFM_SUBTYPE(active)) {
   9999 			case IFM_10_T:
   10000 				sc->sc_ctrl |= CTRL_SPEED_10;
   10001 				break;
   10002 			case IFM_100_TX:
   10003 				sc->sc_ctrl |= CTRL_SPEED_100;
   10004 				break;
   10005 			case IFM_1000_T:
   10006 				sc->sc_ctrl |= CTRL_SPEED_1000;
   10007 				break;
   10008 			default:
   10009 				/*
   10010 				 * Fiber?
   10011 				 * Shoud not enter here.
   10012 				 */
   10013 				device_printf(dev, "unknown media (%x)\n",
   10014 				    active);
   10015 				break;
   10016 			}
   10017 			if (active & IFM_FDX)
   10018 				sc->sc_ctrl |= CTRL_FD;
   10019 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10020 		}
   10021 	} else if (sc->sc_type == WM_T_PCH) {
   10022 		wm_k1_gig_workaround_hv(sc,
   10023 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10024 	}
   10025 
   10026 	/*
   10027 	 * When connected at 10Mbps half-duplex, some parts are excessively
   10028 	 * aggressive resulting in many collisions. To avoid this, increase
   10029 	 * the IPG and reduce Rx latency in the PHY.
   10030 	 */
   10031 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   10032 	    && link) {
   10033 		uint32_t tipg_reg;
   10034 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   10035 		bool fdx;
   10036 		uint16_t emi_addr, emi_val;
   10037 
   10038 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   10039 		tipg_reg &= ~TIPG_IPGT_MASK;
   10040 		fdx = status & STATUS_FD;
   10041 
   10042 		if (!fdx && (speed == STATUS_SPEED_10)) {
   10043 			tipg_reg |= 0xff;
   10044 			/* Reduce Rx latency in analog PHY */
   10045 			emi_val = 0;
   10046 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   10047 		    fdx && speed != STATUS_SPEED_1000) {
   10048 			tipg_reg |= 0xc;
   10049 			emi_val = 1;
   10050 		} else {
   10051 			/* Roll back the default values */
   10052 			tipg_reg |= 0x08;
   10053 			emi_val = 1;
   10054 		}
   10055 
   10056 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   10057 
   10058 		rv = sc->phy.acquire(sc);
   10059 		if (rv)
   10060 			return;
   10061 
   10062 		if (sc->sc_type == WM_T_PCH2)
   10063 			emi_addr = I82579_RX_CONFIG;
   10064 		else
   10065 			emi_addr = I217_RX_CONFIG;
   10066 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   10067 
   10068 		if (sc->sc_type >= WM_T_PCH_LPT) {
   10069 			uint16_t phy_reg;
   10070 
   10071 			sc->phy.readreg_locked(dev, 2,
   10072 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   10073 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   10074 			if (speed == STATUS_SPEED_100
   10075 			    || speed == STATUS_SPEED_10)
   10076 				phy_reg |= 0x3e8;
   10077 			else
   10078 				phy_reg |= 0xfa;
   10079 			sc->phy.writereg_locked(dev, 2,
   10080 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   10081 
   10082 			if (speed == STATUS_SPEED_1000) {
   10083 				sc->phy.readreg_locked(dev, 2,
   10084 				    HV_PM_CTRL, &phy_reg);
   10085 
   10086 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   10087 
   10088 				sc->phy.writereg_locked(dev, 2,
   10089 				    HV_PM_CTRL, phy_reg);
   10090 			}
   10091 		}
   10092 		sc->phy.release(sc);
   10093 
   10094 		if (rv)
   10095 			return;
   10096 
   10097 		if (sc->sc_type >= WM_T_PCH_SPT) {
   10098 			uint16_t data, ptr_gap;
   10099 
   10100 			if (speed == STATUS_SPEED_1000) {
   10101 				rv = sc->phy.acquire(sc);
   10102 				if (rv)
   10103 					return;
   10104 
   10105 				rv = sc->phy.readreg_locked(dev, 2,
   10106 				    I82579_UNKNOWN1, &data);
   10107 				if (rv) {
   10108 					sc->phy.release(sc);
   10109 					return;
   10110 				}
   10111 
   10112 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   10113 				if (ptr_gap < 0x18) {
   10114 					data &= ~(0x3ff << 2);
   10115 					data |= (0x18 << 2);
   10116 					rv = sc->phy.writereg_locked(dev,
   10117 					    2, I82579_UNKNOWN1, data);
   10118 				}
   10119 				sc->phy.release(sc);
   10120 				if (rv)
   10121 					return;
   10122 			} else {
   10123 				rv = sc->phy.acquire(sc);
   10124 				if (rv)
   10125 					return;
   10126 
   10127 				rv = sc->phy.writereg_locked(dev, 2,
   10128 				    I82579_UNKNOWN1, 0xc023);
   10129 				sc->phy.release(sc);
   10130 				if (rv)
   10131 					return;
   10132 
   10133 			}
   10134 		}
   10135 	}
   10136 
   10137 	/*
   10138 	 * I217 Packet Loss issue:
   10139 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   10140 	 * on power up.
   10141 	 * Set the Beacon Duration for I217 to 8 usec
   10142 	 */
   10143 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10144 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   10145 		reg &= ~FEXTNVM4_BEACON_DURATION;
   10146 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   10147 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   10148 	}
   10149 
   10150 	/* Work-around I218 hang issue */
   10151 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   10152 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   10153 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   10154 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   10155 		wm_k1_workaround_lpt_lp(sc, link);
   10156 
   10157 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10158 		/*
   10159 		 * Set platform power management values for Latency
   10160 		 * Tolerance Reporting (LTR)
   10161 		 */
   10162 		wm_platform_pm_pch_lpt(sc,
   10163 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10164 	}
   10165 
   10166 	/* Clear link partner's EEE ability */
   10167 	sc->eee_lp_ability = 0;
   10168 
   10169 	/* FEXTNVM6 K1-off workaround */
   10170 	if (sc->sc_type == WM_T_PCH_SPT) {
   10171 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   10172 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   10173 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   10174 		else
   10175 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   10176 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   10177 	}
   10178 
   10179 	if (!link)
   10180 		return;
   10181 
   10182 	switch (sc->sc_type) {
   10183 	case WM_T_PCH2:
   10184 		wm_k1_workaround_lv(sc);
   10185 		/* FALLTHROUGH */
   10186 	case WM_T_PCH:
   10187 		if (sc->sc_phytype == WMPHY_82578)
   10188 			wm_link_stall_workaround_hv(sc);
   10189 		break;
   10190 	default:
   10191 		break;
   10192 	}
   10193 
   10194 	/* Enable/Disable EEE after link up */
   10195 	if (sc->sc_phytype > WMPHY_82579)
   10196 		wm_set_eee_pchlan(sc);
   10197 }
   10198 
   10199 /*
   10200  * wm_linkintr_tbi:
   10201  *
   10202  *	Helper; handle link interrupts for TBI mode.
   10203  */
   10204 static void
   10205 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   10206 {
   10207 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10208 	uint32_t status;
   10209 
   10210 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10211 		__func__));
   10212 
   10213 	status = CSR_READ(sc, WMREG_STATUS);
   10214 	if (icr & ICR_LSC) {
   10215 		wm_check_for_link(sc);
   10216 		if (status & STATUS_LU) {
   10217 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10218 				device_xname(sc->sc_dev),
   10219 				(status & STATUS_FD) ? "FDX" : "HDX"));
   10220 			/*
   10221 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10222 			 * so we should update sc->sc_ctrl
   10223 			 */
   10224 
   10225 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10226 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10227 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10228 			if (status & STATUS_FD)
   10229 				sc->sc_tctl |=
   10230 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10231 			else
   10232 				sc->sc_tctl |=
   10233 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10234 			if (sc->sc_ctrl & CTRL_TFCE)
   10235 				sc->sc_fcrtl |= FCRTL_XONE;
   10236 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10237 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10238 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   10239 			sc->sc_tbi_linkup = 1;
   10240 			if_link_state_change(ifp, LINK_STATE_UP);
   10241 		} else {
   10242 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10243 				device_xname(sc->sc_dev)));
   10244 			sc->sc_tbi_linkup = 0;
   10245 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10246 		}
   10247 		/* Update LED */
   10248 		wm_tbi_serdes_set_linkled(sc);
   10249 	} else if (icr & ICR_RXSEQ)
   10250 		DPRINTF(sc, WM_DEBUG_LINK,
   10251 		    ("%s: LINK: Receive sequence error\n",
   10252 			device_xname(sc->sc_dev)));
   10253 }
   10254 
   10255 /*
   10256  * wm_linkintr_serdes:
   10257  *
   10258  *	Helper; handle link interrupts for TBI mode.
   10259  */
   10260 static void
   10261 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   10262 {
   10263 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10264 	struct mii_data *mii = &sc->sc_mii;
   10265 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10266 	uint32_t pcs_adv, pcs_lpab, reg;
   10267 
   10268 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10269 		__func__));
   10270 
   10271 	if (icr & ICR_LSC) {
   10272 		/* Check PCS */
   10273 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10274 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   10275 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   10276 				device_xname(sc->sc_dev)));
   10277 			mii->mii_media_status |= IFM_ACTIVE;
   10278 			sc->sc_tbi_linkup = 1;
   10279 			if_link_state_change(ifp, LINK_STATE_UP);
   10280 		} else {
   10281 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10282 				device_xname(sc->sc_dev)));
   10283 			mii->mii_media_status |= IFM_NONE;
   10284 			sc->sc_tbi_linkup = 0;
   10285 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10286 			wm_tbi_serdes_set_linkled(sc);
   10287 			return;
   10288 		}
   10289 		mii->mii_media_active |= IFM_1000_SX;
   10290 		if ((reg & PCS_LSTS_FDX) != 0)
   10291 			mii->mii_media_active |= IFM_FDX;
   10292 		else
   10293 			mii->mii_media_active |= IFM_HDX;
   10294 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10295 			/* Check flow */
   10296 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10297 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10298 				DPRINTF(sc, WM_DEBUG_LINK,
   10299 				    ("XXX LINKOK but not ACOMP\n"));
   10300 				return;
   10301 			}
   10302 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10303 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10304 			DPRINTF(sc, WM_DEBUG_LINK,
   10305 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   10306 			if ((pcs_adv & TXCW_SYM_PAUSE)
   10307 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10308 				mii->mii_media_active |= IFM_FLOW
   10309 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10310 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10311 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10312 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   10313 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10314 				mii->mii_media_active |= IFM_FLOW
   10315 				    | IFM_ETH_TXPAUSE;
   10316 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   10317 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10318 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10319 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10320 				mii->mii_media_active |= IFM_FLOW
   10321 				    | IFM_ETH_RXPAUSE;
   10322 		}
   10323 		/* Update LED */
   10324 		wm_tbi_serdes_set_linkled(sc);
   10325 	} else
   10326 		DPRINTF(sc, WM_DEBUG_LINK,
   10327 		    ("%s: LINK: Receive sequence error\n",
   10328 		    device_xname(sc->sc_dev)));
   10329 }
   10330 
   10331 /*
   10332  * wm_linkintr:
   10333  *
   10334  *	Helper; handle link interrupts.
   10335  */
   10336 static void
   10337 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   10338 {
   10339 
   10340 	KASSERT(WM_CORE_LOCKED(sc));
   10341 
   10342 	if (sc->sc_flags & WM_F_HAS_MII)
   10343 		wm_linkintr_gmii(sc, icr);
   10344 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10345 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   10346 		wm_linkintr_serdes(sc, icr);
   10347 	else
   10348 		wm_linkintr_tbi(sc, icr);
   10349 }
   10350 
   10351 
   10352 static inline void
   10353 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   10354 {
   10355 
   10356 	if (wmq->wmq_txrx_use_workqueue)
   10357 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   10358 	else
   10359 		softint_schedule(wmq->wmq_si);
   10360 }
   10361 
   10362 static inline void
   10363 wm_legacy_intr_disable(struct wm_softc *sc)
   10364 {
   10365 
   10366 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   10367 }
   10368 
   10369 static inline void
   10370 wm_legacy_intr_enable(struct wm_softc *sc)
   10371 {
   10372 
   10373 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   10374 }
   10375 
   10376 /*
   10377  * wm_intr_legacy:
   10378  *
   10379  *	Interrupt service routine for INTx and MSI.
   10380  */
   10381 static int
   10382 wm_intr_legacy(void *arg)
   10383 {
   10384 	struct wm_softc *sc = arg;
   10385 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10386 	struct wm_queue *wmq = &sc->sc_queue[0];
   10387 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10388 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10389 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10390 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10391 	uint32_t icr, rndval = 0;
   10392 	bool more = false;
   10393 
   10394 	icr = CSR_READ(sc, WMREG_ICR);
   10395 	if ((icr & sc->sc_icr) == 0)
   10396 		return 0;
   10397 
   10398 	DPRINTF(sc, WM_DEBUG_TX,
   10399 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   10400 	if (rndval == 0)
   10401 		rndval = icr;
   10402 
   10403 	mutex_enter(txq->txq_lock);
   10404 
   10405 	if (txq->txq_stopping) {
   10406 		mutex_exit(txq->txq_lock);
   10407 		return 1;
   10408 	}
   10409 
   10410 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10411 	if (icr & ICR_TXDW) {
   10412 		DPRINTF(sc, WM_DEBUG_TX,
   10413 		    ("%s: TX: got TXDW interrupt\n",
   10414 			device_xname(sc->sc_dev)));
   10415 		WM_Q_EVCNT_INCR(txq, txdw);
   10416 	}
   10417 #endif
   10418 	if (txlimit > 0) {
   10419 		more |= wm_txeof(txq, txlimit);
   10420 		if (!IF_IS_EMPTY(&ifp->if_snd))
   10421 			more = true;
   10422 	} else
   10423 		more = true;
   10424 	mutex_exit(txq->txq_lock);
   10425 
   10426 	mutex_enter(rxq->rxq_lock);
   10427 
   10428 	if (rxq->rxq_stopping) {
   10429 		mutex_exit(rxq->rxq_lock);
   10430 		return 1;
   10431 	}
   10432 
   10433 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10434 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   10435 		DPRINTF(sc, WM_DEBUG_RX,
   10436 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
   10437 			device_xname(sc->sc_dev),
   10438 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   10439 		WM_Q_EVCNT_INCR(rxq, intr);
   10440 	}
   10441 #endif
   10442 	if (rxlimit > 0) {
   10443 		/*
   10444 		 * wm_rxeof() does *not* call upper layer functions directly,
   10445 		 * as if_percpuq_enqueue() just call softint_schedule().
   10446 		 * So, we can call wm_rxeof() in interrupt context.
   10447 		 */
   10448 		more = wm_rxeof(rxq, rxlimit);
   10449 	} else
   10450 		more = true;
   10451 
   10452 	mutex_exit(rxq->rxq_lock);
   10453 
   10454 	WM_CORE_LOCK(sc);
   10455 
   10456 	if (sc->sc_core_stopping) {
   10457 		WM_CORE_UNLOCK(sc);
   10458 		return 1;
   10459 	}
   10460 
   10461 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   10462 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10463 		wm_linkintr(sc, icr);
   10464 	}
   10465 	if ((icr & ICR_GPI(0)) != 0)
   10466 		device_printf(sc->sc_dev, "got module interrupt\n");
   10467 
   10468 	WM_CORE_UNLOCK(sc);
   10469 
   10470 	if (icr & ICR_RXO) {
   10471 #if defined(WM_DEBUG)
   10472 		log(LOG_WARNING, "%s: Receive overrun\n",
   10473 		    device_xname(sc->sc_dev));
   10474 #endif /* defined(WM_DEBUG) */
   10475 	}
   10476 
   10477 	rnd_add_uint32(&sc->rnd_source, rndval);
   10478 
   10479 	if (more) {
   10480 		/* Try to get more packets going. */
   10481 		wm_legacy_intr_disable(sc);
   10482 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10483 		wm_sched_handle_queue(sc, wmq);
   10484 	}
   10485 
   10486 	return 1;
   10487 }
   10488 
   10489 static inline void
   10490 wm_txrxintr_disable(struct wm_queue *wmq)
   10491 {
   10492 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10493 
   10494 	if (__predict_false(!wm_is_using_msix(sc))) {
   10495 		wm_legacy_intr_disable(sc);
   10496 		return;
   10497 	}
   10498 
   10499 	if (sc->sc_type == WM_T_82574)
   10500 		CSR_WRITE(sc, WMREG_IMC,
   10501 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   10502 	else if (sc->sc_type == WM_T_82575)
   10503 		CSR_WRITE(sc, WMREG_EIMC,
   10504 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10505 	else
   10506 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   10507 }
   10508 
   10509 static inline void
   10510 wm_txrxintr_enable(struct wm_queue *wmq)
   10511 {
   10512 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10513 
   10514 	wm_itrs_calculate(sc, wmq);
   10515 
   10516 	if (__predict_false(!wm_is_using_msix(sc))) {
   10517 		wm_legacy_intr_enable(sc);
   10518 		return;
   10519 	}
   10520 
   10521 	/*
   10522 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   10523 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   10524 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   10525 	 * while each wm_handle_queue(wmq) is runnig.
   10526 	 */
   10527 	if (sc->sc_type == WM_T_82574)
   10528 		CSR_WRITE(sc, WMREG_IMS,
   10529 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   10530 	else if (sc->sc_type == WM_T_82575)
   10531 		CSR_WRITE(sc, WMREG_EIMS,
   10532 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10533 	else
   10534 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   10535 }
   10536 
   10537 static int
   10538 wm_txrxintr_msix(void *arg)
   10539 {
   10540 	struct wm_queue *wmq = arg;
   10541 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10542 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10543 	struct wm_softc *sc = txq->txq_sc;
   10544 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10545 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10546 	bool txmore;
   10547 	bool rxmore;
   10548 
   10549 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   10550 
   10551 	DPRINTF(sc, WM_DEBUG_TX,
   10552 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   10553 
   10554 	wm_txrxintr_disable(wmq);
   10555 
   10556 	mutex_enter(txq->txq_lock);
   10557 
   10558 	if (txq->txq_stopping) {
   10559 		mutex_exit(txq->txq_lock);
   10560 		return 1;
   10561 	}
   10562 
   10563 	WM_Q_EVCNT_INCR(txq, txdw);
   10564 	if (txlimit > 0) {
   10565 		txmore = wm_txeof(txq, txlimit);
   10566 		/* wm_deferred start() is done in wm_handle_queue(). */
   10567 	} else
   10568 		txmore = true;
   10569 	mutex_exit(txq->txq_lock);
   10570 
   10571 	DPRINTF(sc, WM_DEBUG_RX,
   10572 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   10573 	mutex_enter(rxq->rxq_lock);
   10574 
   10575 	if (rxq->rxq_stopping) {
   10576 		mutex_exit(rxq->rxq_lock);
   10577 		return 1;
   10578 	}
   10579 
   10580 	WM_Q_EVCNT_INCR(rxq, intr);
   10581 	if (rxlimit > 0) {
   10582 		rxmore = wm_rxeof(rxq, rxlimit);
   10583 	} else
   10584 		rxmore = true;
   10585 	mutex_exit(rxq->rxq_lock);
   10586 
   10587 	wm_itrs_writereg(sc, wmq);
   10588 
   10589 	if (txmore || rxmore) {
   10590 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10591 		wm_sched_handle_queue(sc, wmq);
   10592 	} else
   10593 		wm_txrxintr_enable(wmq);
   10594 
   10595 	return 1;
   10596 }
   10597 
   10598 static void
   10599 wm_handle_queue(void *arg)
   10600 {
   10601 	struct wm_queue *wmq = arg;
   10602 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10603 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10604 	struct wm_softc *sc = txq->txq_sc;
   10605 	u_int txlimit = sc->sc_tx_process_limit;
   10606 	u_int rxlimit = sc->sc_rx_process_limit;
   10607 	bool txmore;
   10608 	bool rxmore;
   10609 
   10610 	mutex_enter(txq->txq_lock);
   10611 	if (txq->txq_stopping) {
   10612 		mutex_exit(txq->txq_lock);
   10613 		return;
   10614 	}
   10615 	txmore = wm_txeof(txq, txlimit);
   10616 	wm_deferred_start_locked(txq);
   10617 	mutex_exit(txq->txq_lock);
   10618 
   10619 	mutex_enter(rxq->rxq_lock);
   10620 	if (rxq->rxq_stopping) {
   10621 		mutex_exit(rxq->rxq_lock);
   10622 		return;
   10623 	}
   10624 	WM_Q_EVCNT_INCR(rxq, defer);
   10625 	rxmore = wm_rxeof(rxq, rxlimit);
   10626 	mutex_exit(rxq->rxq_lock);
   10627 
   10628 	if (txmore || rxmore) {
   10629 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10630 		wm_sched_handle_queue(sc, wmq);
   10631 	} else
   10632 		wm_txrxintr_enable(wmq);
   10633 }
   10634 
   10635 static void
   10636 wm_handle_queue_work(struct work *wk, void *context)
   10637 {
   10638 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10639 
   10640 	/*
   10641 	 * "enqueued flag" is not required here.
   10642 	 */
   10643 	wm_handle_queue(wmq);
   10644 }
   10645 
   10646 /*
   10647  * wm_linkintr_msix:
   10648  *
   10649  *	Interrupt service routine for link status change for MSI-X.
   10650  */
   10651 static int
   10652 wm_linkintr_msix(void *arg)
   10653 {
   10654 	struct wm_softc *sc = arg;
   10655 	uint32_t reg;
   10656 	bool has_rxo;
   10657 
   10658 	reg = CSR_READ(sc, WMREG_ICR);
   10659 	WM_CORE_LOCK(sc);
   10660 	DPRINTF(sc, WM_DEBUG_LINK,
   10661 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10662 		device_xname(sc->sc_dev), reg));
   10663 
   10664 	if (sc->sc_core_stopping)
   10665 		goto out;
   10666 
   10667 	if ((reg & ICR_LSC) != 0) {
   10668 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10669 		wm_linkintr(sc, ICR_LSC);
   10670 	}
   10671 	if ((reg & ICR_GPI(0)) != 0)
   10672 		device_printf(sc->sc_dev, "got module interrupt\n");
   10673 
   10674 	/*
   10675 	 * XXX 82574 MSI-X mode workaround
   10676 	 *
   10677 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10678 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10679 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10680 	 * interrupts by writing WMREG_ICS to process receive packets.
   10681 	 */
   10682 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10683 #if defined(WM_DEBUG)
   10684 		log(LOG_WARNING, "%s: Receive overrun\n",
   10685 		    device_xname(sc->sc_dev));
   10686 #endif /* defined(WM_DEBUG) */
   10687 
   10688 		has_rxo = true;
   10689 		/*
   10690 		 * The RXO interrupt is very high rate when receive traffic is
   10691 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10692 		 * interrupts. ICR_OTHER will be enabled at the end of
   10693 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10694 		 * ICR_RXQ(1) interrupts.
   10695 		 */
   10696 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10697 
   10698 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10699 	}
   10700 
   10701 
   10702 
   10703 out:
   10704 	WM_CORE_UNLOCK(sc);
   10705 
   10706 	if (sc->sc_type == WM_T_82574) {
   10707 		if (!has_rxo)
   10708 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10709 		else
   10710 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10711 	} else if (sc->sc_type == WM_T_82575)
   10712 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10713 	else
   10714 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10715 
   10716 	return 1;
   10717 }
   10718 
   10719 /*
   10720  * Media related.
   10721  * GMII, SGMII, TBI (and SERDES)
   10722  */
   10723 
   10724 /* Common */
   10725 
   10726 /*
   10727  * wm_tbi_serdes_set_linkled:
   10728  *
   10729  *	Update the link LED on TBI and SERDES devices.
   10730  */
   10731 static void
   10732 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10733 {
   10734 
   10735 	if (sc->sc_tbi_linkup)
   10736 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10737 	else
   10738 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10739 
   10740 	/* 82540 or newer devices are active low */
   10741 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10742 
   10743 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10744 }
   10745 
   10746 /* GMII related */
   10747 
   10748 /*
   10749  * wm_gmii_reset:
   10750  *
   10751  *	Reset the PHY.
   10752  */
   10753 static void
   10754 wm_gmii_reset(struct wm_softc *sc)
   10755 {
   10756 	uint32_t reg;
   10757 	int rv;
   10758 
   10759 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10760 		device_xname(sc->sc_dev), __func__));
   10761 
   10762 	rv = sc->phy.acquire(sc);
   10763 	if (rv != 0) {
   10764 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10765 		    __func__);
   10766 		return;
   10767 	}
   10768 
   10769 	switch (sc->sc_type) {
   10770 	case WM_T_82542_2_0:
   10771 	case WM_T_82542_2_1:
   10772 		/* null */
   10773 		break;
   10774 	case WM_T_82543:
   10775 		/*
   10776 		 * With 82543, we need to force speed and duplex on the MAC
   10777 		 * equal to what the PHY speed and duplex configuration is.
   10778 		 * In addition, we need to perform a hardware reset on the PHY
   10779 		 * to take it out of reset.
   10780 		 */
   10781 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10782 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10783 
   10784 		/* The PHY reset pin is active-low. */
   10785 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10786 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10787 		    CTRL_EXT_SWDPIN(4));
   10788 		reg |= CTRL_EXT_SWDPIO(4);
   10789 
   10790 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10791 		CSR_WRITE_FLUSH(sc);
   10792 		delay(10*1000);
   10793 
   10794 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10795 		CSR_WRITE_FLUSH(sc);
   10796 		delay(150);
   10797 #if 0
   10798 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10799 #endif
   10800 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10801 		break;
   10802 	case WM_T_82544:	/* Reset 10000us */
   10803 	case WM_T_82540:
   10804 	case WM_T_82545:
   10805 	case WM_T_82545_3:
   10806 	case WM_T_82546:
   10807 	case WM_T_82546_3:
   10808 	case WM_T_82541:
   10809 	case WM_T_82541_2:
   10810 	case WM_T_82547:
   10811 	case WM_T_82547_2:
   10812 	case WM_T_82571:	/* Reset 100us */
   10813 	case WM_T_82572:
   10814 	case WM_T_82573:
   10815 	case WM_T_82574:
   10816 	case WM_T_82575:
   10817 	case WM_T_82576:
   10818 	case WM_T_82580:
   10819 	case WM_T_I350:
   10820 	case WM_T_I354:
   10821 	case WM_T_I210:
   10822 	case WM_T_I211:
   10823 	case WM_T_82583:
   10824 	case WM_T_80003:
   10825 		/* Generic reset */
   10826 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10827 		CSR_WRITE_FLUSH(sc);
   10828 		delay(20000);
   10829 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10830 		CSR_WRITE_FLUSH(sc);
   10831 		delay(20000);
   10832 
   10833 		if ((sc->sc_type == WM_T_82541)
   10834 		    || (sc->sc_type == WM_T_82541_2)
   10835 		    || (sc->sc_type == WM_T_82547)
   10836 		    || (sc->sc_type == WM_T_82547_2)) {
   10837 			/* Workaround for igp are done in igp_reset() */
   10838 			/* XXX add code to set LED after phy reset */
   10839 		}
   10840 		break;
   10841 	case WM_T_ICH8:
   10842 	case WM_T_ICH9:
   10843 	case WM_T_ICH10:
   10844 	case WM_T_PCH:
   10845 	case WM_T_PCH2:
   10846 	case WM_T_PCH_LPT:
   10847 	case WM_T_PCH_SPT:
   10848 	case WM_T_PCH_CNP:
   10849 		/* Generic reset */
   10850 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10851 		CSR_WRITE_FLUSH(sc);
   10852 		delay(100);
   10853 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10854 		CSR_WRITE_FLUSH(sc);
   10855 		delay(150);
   10856 		break;
   10857 	default:
   10858 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10859 		    __func__);
   10860 		break;
   10861 	}
   10862 
   10863 	sc->phy.release(sc);
   10864 
   10865 	/* get_cfg_done */
   10866 	wm_get_cfg_done(sc);
   10867 
   10868 	/* Extra setup */
   10869 	switch (sc->sc_type) {
   10870 	case WM_T_82542_2_0:
   10871 	case WM_T_82542_2_1:
   10872 	case WM_T_82543:
   10873 	case WM_T_82544:
   10874 	case WM_T_82540:
   10875 	case WM_T_82545:
   10876 	case WM_T_82545_3:
   10877 	case WM_T_82546:
   10878 	case WM_T_82546_3:
   10879 	case WM_T_82541_2:
   10880 	case WM_T_82547_2:
   10881 	case WM_T_82571:
   10882 	case WM_T_82572:
   10883 	case WM_T_82573:
   10884 	case WM_T_82574:
   10885 	case WM_T_82583:
   10886 	case WM_T_82575:
   10887 	case WM_T_82576:
   10888 	case WM_T_82580:
   10889 	case WM_T_I350:
   10890 	case WM_T_I354:
   10891 	case WM_T_I210:
   10892 	case WM_T_I211:
   10893 	case WM_T_80003:
   10894 		/* Null */
   10895 		break;
   10896 	case WM_T_82541:
   10897 	case WM_T_82547:
   10898 		/* XXX Configure actively LED after PHY reset */
   10899 		break;
   10900 	case WM_T_ICH8:
   10901 	case WM_T_ICH9:
   10902 	case WM_T_ICH10:
   10903 	case WM_T_PCH:
   10904 	case WM_T_PCH2:
   10905 	case WM_T_PCH_LPT:
   10906 	case WM_T_PCH_SPT:
   10907 	case WM_T_PCH_CNP:
   10908 		wm_phy_post_reset(sc);
   10909 		break;
   10910 	default:
   10911 		panic("%s: unknown type\n", __func__);
   10912 		break;
   10913 	}
   10914 }
   10915 
   10916 /*
   10917  * Set up sc_phytype and mii_{read|write}reg.
   10918  *
   10919  *  To identify PHY type, correct read/write function should be selected.
   10920  * To select correct read/write function, PCI ID or MAC type are required
   10921  * without accessing PHY registers.
   10922  *
   10923  *  On the first call of this function, PHY ID is not known yet. Check
   10924  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10925  * result might be incorrect.
   10926  *
   10927  *  In the second call, PHY OUI and model is used to identify PHY type.
   10928  * It might not be perfect because of the lack of compared entry, but it
   10929  * would be better than the first call.
   10930  *
   10931  *  If the detected new result and previous assumption is different,
   10932  * a diagnostic message will be printed.
   10933  */
   10934 static void
   10935 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10936     uint16_t phy_model)
   10937 {
   10938 	device_t dev = sc->sc_dev;
   10939 	struct mii_data *mii = &sc->sc_mii;
   10940 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10941 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10942 	mii_readreg_t new_readreg;
   10943 	mii_writereg_t new_writereg;
   10944 	bool dodiag = true;
   10945 
   10946 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10947 		device_xname(sc->sc_dev), __func__));
   10948 
   10949 	/*
   10950 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10951 	 * incorrect. So don't print diag output when it's 2nd call.
   10952 	 */
   10953 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10954 		dodiag = false;
   10955 
   10956 	if (mii->mii_readreg == NULL) {
   10957 		/*
   10958 		 *  This is the first call of this function. For ICH and PCH
   10959 		 * variants, it's difficult to determine the PHY access method
   10960 		 * by sc_type, so use the PCI product ID for some devices.
   10961 		 */
   10962 
   10963 		switch (sc->sc_pcidevid) {
   10964 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10965 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10966 			/* 82577 */
   10967 			new_phytype = WMPHY_82577;
   10968 			break;
   10969 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10970 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10971 			/* 82578 */
   10972 			new_phytype = WMPHY_82578;
   10973 			break;
   10974 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10975 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10976 			/* 82579 */
   10977 			new_phytype = WMPHY_82579;
   10978 			break;
   10979 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10980 		case PCI_PRODUCT_INTEL_82801I_BM:
   10981 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10982 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10983 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10984 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10985 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10986 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10987 			/* ICH8, 9, 10 with 82567 */
   10988 			new_phytype = WMPHY_BM;
   10989 			break;
   10990 		default:
   10991 			break;
   10992 		}
   10993 	} else {
   10994 		/* It's not the first call. Use PHY OUI and model */
   10995 		switch (phy_oui) {
   10996 		case MII_OUI_ATTANSIC: /* atphy(4) */
   10997 			switch (phy_model) {
   10998 			case MII_MODEL_ATTANSIC_AR8021:
   10999 				new_phytype = WMPHY_82578;
   11000 				break;
   11001 			default:
   11002 				break;
   11003 			}
   11004 			break;
   11005 		case MII_OUI_xxMARVELL:
   11006 			switch (phy_model) {
   11007 			case MII_MODEL_xxMARVELL_I210:
   11008 				new_phytype = WMPHY_I210;
   11009 				break;
   11010 			case MII_MODEL_xxMARVELL_E1011:
   11011 			case MII_MODEL_xxMARVELL_E1000_3:
   11012 			case MII_MODEL_xxMARVELL_E1000_5:
   11013 			case MII_MODEL_xxMARVELL_E1112:
   11014 				new_phytype = WMPHY_M88;
   11015 				break;
   11016 			case MII_MODEL_xxMARVELL_E1149:
   11017 				new_phytype = WMPHY_BM;
   11018 				break;
   11019 			case MII_MODEL_xxMARVELL_E1111:
   11020 			case MII_MODEL_xxMARVELL_I347:
   11021 			case MII_MODEL_xxMARVELL_E1512:
   11022 			case MII_MODEL_xxMARVELL_E1340M:
   11023 			case MII_MODEL_xxMARVELL_E1543:
   11024 				new_phytype = WMPHY_M88;
   11025 				break;
   11026 			case MII_MODEL_xxMARVELL_I82563:
   11027 				new_phytype = WMPHY_GG82563;
   11028 				break;
   11029 			default:
   11030 				break;
   11031 			}
   11032 			break;
   11033 		case MII_OUI_INTEL:
   11034 			switch (phy_model) {
   11035 			case MII_MODEL_INTEL_I82577:
   11036 				new_phytype = WMPHY_82577;
   11037 				break;
   11038 			case MII_MODEL_INTEL_I82579:
   11039 				new_phytype = WMPHY_82579;
   11040 				break;
   11041 			case MII_MODEL_INTEL_I217:
   11042 				new_phytype = WMPHY_I217;
   11043 				break;
   11044 			case MII_MODEL_INTEL_I82580:
   11045 				new_phytype = WMPHY_82580;
   11046 				break;
   11047 			case MII_MODEL_INTEL_I350:
   11048 				new_phytype = WMPHY_I350;
   11049 				break;
   11050 			default:
   11051 				break;
   11052 			}
   11053 			break;
   11054 		case MII_OUI_yyINTEL:
   11055 			switch (phy_model) {
   11056 			case MII_MODEL_yyINTEL_I82562G:
   11057 			case MII_MODEL_yyINTEL_I82562EM:
   11058 			case MII_MODEL_yyINTEL_I82562ET:
   11059 				new_phytype = WMPHY_IFE;
   11060 				break;
   11061 			case MII_MODEL_yyINTEL_IGP01E1000:
   11062 				new_phytype = WMPHY_IGP;
   11063 				break;
   11064 			case MII_MODEL_yyINTEL_I82566:
   11065 				new_phytype = WMPHY_IGP_3;
   11066 				break;
   11067 			default:
   11068 				break;
   11069 			}
   11070 			break;
   11071 		default:
   11072 			break;
   11073 		}
   11074 
   11075 		if (dodiag) {
   11076 			if (new_phytype == WMPHY_UNKNOWN)
   11077 				aprint_verbose_dev(dev,
   11078 				    "%s: Unknown PHY model. OUI=%06x, "
   11079 				    "model=%04x\n", __func__, phy_oui,
   11080 				    phy_model);
   11081 
   11082 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11083 			    && (sc->sc_phytype != new_phytype)) {
   11084 				aprint_error_dev(dev, "Previously assumed PHY "
   11085 				    "type(%u) was incorrect. PHY type from PHY"
   11086 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   11087 			}
   11088 		}
   11089 	}
   11090 
   11091 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   11092 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   11093 		/* SGMII */
   11094 		new_readreg = wm_sgmii_readreg;
   11095 		new_writereg = wm_sgmii_writereg;
   11096 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11097 		/* BM2 (phyaddr == 1) */
   11098 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11099 		    && (new_phytype != WMPHY_BM)
   11100 		    && (new_phytype != WMPHY_UNKNOWN))
   11101 			doubt_phytype = new_phytype;
   11102 		new_phytype = WMPHY_BM;
   11103 		new_readreg = wm_gmii_bm_readreg;
   11104 		new_writereg = wm_gmii_bm_writereg;
   11105 	} else if (sc->sc_type >= WM_T_PCH) {
   11106 		/* All PCH* use _hv_ */
   11107 		new_readreg = wm_gmii_hv_readreg;
   11108 		new_writereg = wm_gmii_hv_writereg;
   11109 	} else if (sc->sc_type >= WM_T_ICH8) {
   11110 		/* non-82567 ICH8, 9 and 10 */
   11111 		new_readreg = wm_gmii_i82544_readreg;
   11112 		new_writereg = wm_gmii_i82544_writereg;
   11113 	} else if (sc->sc_type >= WM_T_80003) {
   11114 		/* 80003 */
   11115 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11116 		    && (new_phytype != WMPHY_GG82563)
   11117 		    && (new_phytype != WMPHY_UNKNOWN))
   11118 			doubt_phytype = new_phytype;
   11119 		new_phytype = WMPHY_GG82563;
   11120 		new_readreg = wm_gmii_i80003_readreg;
   11121 		new_writereg = wm_gmii_i80003_writereg;
   11122 	} else if (sc->sc_type >= WM_T_I210) {
   11123 		/* I210 and I211 */
   11124 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11125 		    && (new_phytype != WMPHY_I210)
   11126 		    && (new_phytype != WMPHY_UNKNOWN))
   11127 			doubt_phytype = new_phytype;
   11128 		new_phytype = WMPHY_I210;
   11129 		new_readreg = wm_gmii_gs40g_readreg;
   11130 		new_writereg = wm_gmii_gs40g_writereg;
   11131 	} else if (sc->sc_type >= WM_T_82580) {
   11132 		/* 82580, I350 and I354 */
   11133 		new_readreg = wm_gmii_82580_readreg;
   11134 		new_writereg = wm_gmii_82580_writereg;
   11135 	} else if (sc->sc_type >= WM_T_82544) {
   11136 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   11137 		new_readreg = wm_gmii_i82544_readreg;
   11138 		new_writereg = wm_gmii_i82544_writereg;
   11139 	} else {
   11140 		new_readreg = wm_gmii_i82543_readreg;
   11141 		new_writereg = wm_gmii_i82543_writereg;
   11142 	}
   11143 
   11144 	if (new_phytype == WMPHY_BM) {
   11145 		/* All BM use _bm_ */
   11146 		new_readreg = wm_gmii_bm_readreg;
   11147 		new_writereg = wm_gmii_bm_writereg;
   11148 	}
   11149 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   11150 		/* All PCH* use _hv_ */
   11151 		new_readreg = wm_gmii_hv_readreg;
   11152 		new_writereg = wm_gmii_hv_writereg;
   11153 	}
   11154 
   11155 	/* Diag output */
   11156 	if (dodiag) {
   11157 		if (doubt_phytype != WMPHY_UNKNOWN)
   11158 			aprint_error_dev(dev, "Assumed new PHY type was "
   11159 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   11160 			    new_phytype);
   11161 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11162 		    && (sc->sc_phytype != new_phytype))
   11163 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   11164 			    "was incorrect. New PHY type = %u\n",
   11165 			    sc->sc_phytype, new_phytype);
   11166 
   11167 		if ((mii->mii_readreg != NULL) &&
   11168 		    (new_phytype == WMPHY_UNKNOWN))
   11169 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   11170 
   11171 		if ((mii->mii_readreg != NULL) &&
   11172 		    (mii->mii_readreg != new_readreg))
   11173 			aprint_error_dev(dev, "Previously assumed PHY "
   11174 			    "read/write function was incorrect.\n");
   11175 	}
   11176 
   11177 	/* Update now */
   11178 	sc->sc_phytype = new_phytype;
   11179 	mii->mii_readreg = new_readreg;
   11180 	mii->mii_writereg = new_writereg;
   11181 	if (new_readreg == wm_gmii_hv_readreg) {
   11182 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   11183 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   11184 	} else if (new_readreg == wm_sgmii_readreg) {
   11185 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   11186 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   11187 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   11188 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   11189 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   11190 	}
   11191 }
   11192 
   11193 /*
   11194  * wm_get_phy_id_82575:
   11195  *
   11196  * Return PHY ID. Return -1 if it failed.
   11197  */
   11198 static int
   11199 wm_get_phy_id_82575(struct wm_softc *sc)
   11200 {
   11201 	uint32_t reg;
   11202 	int phyid = -1;
   11203 
   11204 	/* XXX */
   11205 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11206 		return -1;
   11207 
   11208 	if (wm_sgmii_uses_mdio(sc)) {
   11209 		switch (sc->sc_type) {
   11210 		case WM_T_82575:
   11211 		case WM_T_82576:
   11212 			reg = CSR_READ(sc, WMREG_MDIC);
   11213 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   11214 			break;
   11215 		case WM_T_82580:
   11216 		case WM_T_I350:
   11217 		case WM_T_I354:
   11218 		case WM_T_I210:
   11219 		case WM_T_I211:
   11220 			reg = CSR_READ(sc, WMREG_MDICNFG);
   11221 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   11222 			break;
   11223 		default:
   11224 			return -1;
   11225 		}
   11226 	}
   11227 
   11228 	return phyid;
   11229 }
   11230 
   11231 /*
   11232  * wm_gmii_mediainit:
   11233  *
   11234  *	Initialize media for use on 1000BASE-T devices.
   11235  */
   11236 static void
   11237 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   11238 {
   11239 	device_t dev = sc->sc_dev;
   11240 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11241 	struct mii_data *mii = &sc->sc_mii;
   11242 
   11243 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11244 		device_xname(sc->sc_dev), __func__));
   11245 
   11246 	/* We have GMII. */
   11247 	sc->sc_flags |= WM_F_HAS_MII;
   11248 
   11249 	if (sc->sc_type == WM_T_80003)
   11250 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11251 	else
   11252 		sc->sc_tipg = TIPG_1000T_DFLT;
   11253 
   11254 	/*
   11255 	 * Let the chip set speed/duplex on its own based on
   11256 	 * signals from the PHY.
   11257 	 * XXXbouyer - I'm not sure this is right for the 80003,
   11258 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   11259 	 */
   11260 	sc->sc_ctrl |= CTRL_SLU;
   11261 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11262 
   11263 	/* Initialize our media structures and probe the GMII. */
   11264 	mii->mii_ifp = ifp;
   11265 
   11266 	mii->mii_statchg = wm_gmii_statchg;
   11267 
   11268 	/* get PHY control from SMBus to PCIe */
   11269 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   11270 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   11271 	    || (sc->sc_type == WM_T_PCH_CNP))
   11272 		wm_init_phy_workarounds_pchlan(sc);
   11273 
   11274 	wm_gmii_reset(sc);
   11275 
   11276 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11277 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   11278 	    wm_gmii_mediastatus, sc->sc_core_lock);
   11279 
   11280 	/* Setup internal SGMII PHY for SFP */
   11281 	wm_sgmii_sfp_preconfig(sc);
   11282 
   11283 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   11284 	    || (sc->sc_type == WM_T_82580)
   11285 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   11286 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   11287 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   11288 			/* Attach only one port */
   11289 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   11290 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11291 		} else {
   11292 			int i, id;
   11293 			uint32_t ctrl_ext;
   11294 
   11295 			id = wm_get_phy_id_82575(sc);
   11296 			if (id != -1) {
   11297 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   11298 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   11299 			}
   11300 			if ((id == -1)
   11301 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11302 				/* Power on sgmii phy if it is disabled */
   11303 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11304 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   11305 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   11306 				CSR_WRITE_FLUSH(sc);
   11307 				delay(300*1000); /* XXX too long */
   11308 
   11309 				/*
   11310 				 * From 1 to 8.
   11311 				 *
   11312 				 * I2C access fails with I2C register's ERROR
   11313 				 * bit set, so prevent error message while
   11314 				 * scanning.
   11315 				 */
   11316 				sc->phy.no_errprint = true;
   11317 				for (i = 1; i < 8; i++)
   11318 					mii_attach(sc->sc_dev, &sc->sc_mii,
   11319 					    0xffffffff, i, MII_OFFSET_ANY,
   11320 					    MIIF_DOPAUSE);
   11321 				sc->phy.no_errprint = false;
   11322 
   11323 				/* Restore previous sfp cage power state */
   11324 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11325 			}
   11326 		}
   11327 	} else
   11328 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11329 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11330 
   11331 	/*
   11332 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   11333 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   11334 	 */
   11335 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   11336 		|| (sc->sc_type == WM_T_PCH_SPT)
   11337 		|| (sc->sc_type == WM_T_PCH_CNP))
   11338 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11339 		wm_set_mdio_slow_mode_hv(sc);
   11340 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11341 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11342 	}
   11343 
   11344 	/*
   11345 	 * (For ICH8 variants)
   11346 	 * If PHY detection failed, use BM's r/w function and retry.
   11347 	 */
   11348 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11349 		/* if failed, retry with *_bm_* */
   11350 		aprint_verbose_dev(dev, "Assumed PHY access function "
   11351 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   11352 		    sc->sc_phytype);
   11353 		sc->sc_phytype = WMPHY_BM;
   11354 		mii->mii_readreg = wm_gmii_bm_readreg;
   11355 		mii->mii_writereg = wm_gmii_bm_writereg;
   11356 
   11357 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11358 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11359 	}
   11360 
   11361 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11362 		/* Any PHY wasn't found */
   11363 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   11364 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   11365 		sc->sc_phytype = WMPHY_NONE;
   11366 	} else {
   11367 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   11368 
   11369 		/*
   11370 		 * PHY found! Check PHY type again by the second call of
   11371 		 * wm_gmii_setup_phytype.
   11372 		 */
   11373 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   11374 		    child->mii_mpd_model);
   11375 
   11376 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   11377 	}
   11378 }
   11379 
   11380 /*
   11381  * wm_gmii_mediachange:	[ifmedia interface function]
   11382  *
   11383  *	Set hardware to newly-selected media on a 1000BASE-T device.
   11384  */
   11385 static int
   11386 wm_gmii_mediachange(struct ifnet *ifp)
   11387 {
   11388 	struct wm_softc *sc = ifp->if_softc;
   11389 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11390 	uint32_t reg;
   11391 	int rc;
   11392 
   11393 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11394 		device_xname(sc->sc_dev), __func__));
   11395 
   11396 	KASSERT(WM_CORE_LOCKED(sc));
   11397 
   11398 	if ((sc->sc_if_flags & IFF_UP) == 0)
   11399 		return 0;
   11400 
   11401 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   11402 	if ((sc->sc_type == WM_T_82580)
   11403 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   11404 	    || (sc->sc_type == WM_T_I211)) {
   11405 		reg = CSR_READ(sc, WMREG_PHPM);
   11406 		reg &= ~PHPM_GO_LINK_D;
   11407 		CSR_WRITE(sc, WMREG_PHPM, reg);
   11408 	}
   11409 
   11410 	/* Disable D0 LPLU. */
   11411 	wm_lplu_d0_disable(sc);
   11412 
   11413 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   11414 	sc->sc_ctrl |= CTRL_SLU;
   11415 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11416 	    || (sc->sc_type > WM_T_82543)) {
   11417 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   11418 	} else {
   11419 		sc->sc_ctrl &= ~CTRL_ASDE;
   11420 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11421 		if (ife->ifm_media & IFM_FDX)
   11422 			sc->sc_ctrl |= CTRL_FD;
   11423 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   11424 		case IFM_10_T:
   11425 			sc->sc_ctrl |= CTRL_SPEED_10;
   11426 			break;
   11427 		case IFM_100_TX:
   11428 			sc->sc_ctrl |= CTRL_SPEED_100;
   11429 			break;
   11430 		case IFM_1000_T:
   11431 			sc->sc_ctrl |= CTRL_SPEED_1000;
   11432 			break;
   11433 		case IFM_NONE:
   11434 			/* There is no specific setting for IFM_NONE */
   11435 			break;
   11436 		default:
   11437 			panic("wm_gmii_mediachange: bad media 0x%x",
   11438 			    ife->ifm_media);
   11439 		}
   11440 	}
   11441 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11442 	CSR_WRITE_FLUSH(sc);
   11443 
   11444 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11445 		wm_serdes_mediachange(ifp);
   11446 
   11447 	if (sc->sc_type <= WM_T_82543)
   11448 		wm_gmii_reset(sc);
   11449 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   11450 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   11451 		/* allow time for SFP cage time to power up phy */
   11452 		delay(300 * 1000);
   11453 		wm_gmii_reset(sc);
   11454 	}
   11455 
   11456 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   11457 		return 0;
   11458 	return rc;
   11459 }
   11460 
   11461 /*
   11462  * wm_gmii_mediastatus:	[ifmedia interface function]
   11463  *
   11464  *	Get the current interface media status on a 1000BASE-T device.
   11465  */
   11466 static void
   11467 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11468 {
   11469 	struct wm_softc *sc = ifp->if_softc;
   11470 
   11471 	KASSERT(WM_CORE_LOCKED(sc));
   11472 
   11473 	ether_mediastatus(ifp, ifmr);
   11474 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11475 	    | sc->sc_flowflags;
   11476 }
   11477 
   11478 #define	MDI_IO		CTRL_SWDPIN(2)
   11479 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   11480 #define	MDI_CLK		CTRL_SWDPIN(3)
   11481 
   11482 static void
   11483 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   11484 {
   11485 	uint32_t i, v;
   11486 
   11487 	v = CSR_READ(sc, WMREG_CTRL);
   11488 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11489 	v |= MDI_DIR | CTRL_SWDPIO(3);
   11490 
   11491 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   11492 		if (data & i)
   11493 			v |= MDI_IO;
   11494 		else
   11495 			v &= ~MDI_IO;
   11496 		CSR_WRITE(sc, WMREG_CTRL, v);
   11497 		CSR_WRITE_FLUSH(sc);
   11498 		delay(10);
   11499 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11500 		CSR_WRITE_FLUSH(sc);
   11501 		delay(10);
   11502 		CSR_WRITE(sc, WMREG_CTRL, v);
   11503 		CSR_WRITE_FLUSH(sc);
   11504 		delay(10);
   11505 	}
   11506 }
   11507 
   11508 static uint16_t
   11509 wm_i82543_mii_recvbits(struct wm_softc *sc)
   11510 {
   11511 	uint32_t v, i;
   11512 	uint16_t data = 0;
   11513 
   11514 	v = CSR_READ(sc, WMREG_CTRL);
   11515 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11516 	v |= CTRL_SWDPIO(3);
   11517 
   11518 	CSR_WRITE(sc, WMREG_CTRL, v);
   11519 	CSR_WRITE_FLUSH(sc);
   11520 	delay(10);
   11521 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11522 	CSR_WRITE_FLUSH(sc);
   11523 	delay(10);
   11524 	CSR_WRITE(sc, WMREG_CTRL, v);
   11525 	CSR_WRITE_FLUSH(sc);
   11526 	delay(10);
   11527 
   11528 	for (i = 0; i < 16; i++) {
   11529 		data <<= 1;
   11530 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11531 		CSR_WRITE_FLUSH(sc);
   11532 		delay(10);
   11533 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   11534 			data |= 1;
   11535 		CSR_WRITE(sc, WMREG_CTRL, v);
   11536 		CSR_WRITE_FLUSH(sc);
   11537 		delay(10);
   11538 	}
   11539 
   11540 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11541 	CSR_WRITE_FLUSH(sc);
   11542 	delay(10);
   11543 	CSR_WRITE(sc, WMREG_CTRL, v);
   11544 	CSR_WRITE_FLUSH(sc);
   11545 	delay(10);
   11546 
   11547 	return data;
   11548 }
   11549 
   11550 #undef MDI_IO
   11551 #undef MDI_DIR
   11552 #undef MDI_CLK
   11553 
   11554 /*
   11555  * wm_gmii_i82543_readreg:	[mii interface function]
   11556  *
   11557  *	Read a PHY register on the GMII (i82543 version).
   11558  */
   11559 static int
   11560 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11561 {
   11562 	struct wm_softc *sc = device_private(dev);
   11563 
   11564 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11565 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   11566 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   11567 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   11568 
   11569 	DPRINTF(sc, WM_DEBUG_GMII,
   11570 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   11571 		device_xname(dev), phy, reg, *val));
   11572 
   11573 	return 0;
   11574 }
   11575 
   11576 /*
   11577  * wm_gmii_i82543_writereg:	[mii interface function]
   11578  *
   11579  *	Write a PHY register on the GMII (i82543 version).
   11580  */
   11581 static int
   11582 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   11583 {
   11584 	struct wm_softc *sc = device_private(dev);
   11585 
   11586 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11587 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   11588 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   11589 	    (MII_COMMAND_START << 30), 32);
   11590 
   11591 	return 0;
   11592 }
   11593 
   11594 /*
   11595  * wm_gmii_mdic_readreg:	[mii interface function]
   11596  *
   11597  *	Read a PHY register on the GMII.
   11598  */
   11599 static int
   11600 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11601 {
   11602 	struct wm_softc *sc = device_private(dev);
   11603 	uint32_t mdic = 0;
   11604 	int i;
   11605 
   11606 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11607 	    && (reg > MII_ADDRMASK)) {
   11608 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11609 		    __func__, sc->sc_phytype, reg);
   11610 		reg &= MII_ADDRMASK;
   11611 	}
   11612 
   11613 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   11614 	    MDIC_REGADD(reg));
   11615 
   11616 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11617 		delay(50);
   11618 		mdic = CSR_READ(sc, WMREG_MDIC);
   11619 		if (mdic & MDIC_READY)
   11620 			break;
   11621 	}
   11622 
   11623 	if ((mdic & MDIC_READY) == 0) {
   11624 		DPRINTF(sc, WM_DEBUG_GMII,
   11625 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11626 			device_xname(dev), phy, reg));
   11627 		return ETIMEDOUT;
   11628 	} else if (mdic & MDIC_E) {
   11629 		/* This is normal if no PHY is present. */
   11630 		DPRINTF(sc, WM_DEBUG_GMII,
   11631 		    ("%s: MDIC read error: phy %d reg %d\n",
   11632 			device_xname(sc->sc_dev), phy, reg));
   11633 		return -1;
   11634 	} else
   11635 		*val = MDIC_DATA(mdic);
   11636 
   11637 	/*
   11638 	 * Allow some time after each MDIC transaction to avoid
   11639 	 * reading duplicate data in the next MDIC transaction.
   11640 	 */
   11641 	if (sc->sc_type == WM_T_PCH2)
   11642 		delay(100);
   11643 
   11644 	return 0;
   11645 }
   11646 
   11647 /*
   11648  * wm_gmii_mdic_writereg:	[mii interface function]
   11649  *
   11650  *	Write a PHY register on the GMII.
   11651  */
   11652 static int
   11653 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11654 {
   11655 	struct wm_softc *sc = device_private(dev);
   11656 	uint32_t mdic = 0;
   11657 	int i;
   11658 
   11659 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11660 	    && (reg > MII_ADDRMASK)) {
   11661 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11662 		    __func__, sc->sc_phytype, reg);
   11663 		reg &= MII_ADDRMASK;
   11664 	}
   11665 
   11666 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11667 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11668 
   11669 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11670 		delay(50);
   11671 		mdic = CSR_READ(sc, WMREG_MDIC);
   11672 		if (mdic & MDIC_READY)
   11673 			break;
   11674 	}
   11675 
   11676 	if ((mdic & MDIC_READY) == 0) {
   11677 		DPRINTF(sc, WM_DEBUG_GMII,
   11678 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11679 			device_xname(dev), phy, reg));
   11680 		return ETIMEDOUT;
   11681 	} else if (mdic & MDIC_E) {
   11682 		DPRINTF(sc, WM_DEBUG_GMII,
   11683 		    ("%s: MDIC write error: phy %d reg %d\n",
   11684 			device_xname(dev), phy, reg));
   11685 		return -1;
   11686 	}
   11687 
   11688 	/*
   11689 	 * Allow some time after each MDIC transaction to avoid
   11690 	 * reading duplicate data in the next MDIC transaction.
   11691 	 */
   11692 	if (sc->sc_type == WM_T_PCH2)
   11693 		delay(100);
   11694 
   11695 	return 0;
   11696 }
   11697 
   11698 /*
   11699  * wm_gmii_i82544_readreg:	[mii interface function]
   11700  *
   11701  *	Read a PHY register on the GMII.
   11702  */
   11703 static int
   11704 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11705 {
   11706 	struct wm_softc *sc = device_private(dev);
   11707 	int rv;
   11708 
   11709 	rv = sc->phy.acquire(sc);
   11710 	if (rv != 0) {
   11711 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11712 		return rv;
   11713 	}
   11714 
   11715 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11716 
   11717 	sc->phy.release(sc);
   11718 
   11719 	return rv;
   11720 }
   11721 
   11722 static int
   11723 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11724 {
   11725 	struct wm_softc *sc = device_private(dev);
   11726 	int rv;
   11727 
   11728 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11729 		switch (sc->sc_phytype) {
   11730 		case WMPHY_IGP:
   11731 		case WMPHY_IGP_2:
   11732 		case WMPHY_IGP_3:
   11733 			rv = wm_gmii_mdic_writereg(dev, phy,
   11734 			    IGPHY_PAGE_SELECT, reg);
   11735 			if (rv != 0)
   11736 				return rv;
   11737 			break;
   11738 		default:
   11739 #ifdef WM_DEBUG
   11740 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11741 			    __func__, sc->sc_phytype, reg);
   11742 #endif
   11743 			break;
   11744 		}
   11745 	}
   11746 
   11747 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11748 }
   11749 
   11750 /*
   11751  * wm_gmii_i82544_writereg:	[mii interface function]
   11752  *
   11753  *	Write a PHY register on the GMII.
   11754  */
   11755 static int
   11756 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11757 {
   11758 	struct wm_softc *sc = device_private(dev);
   11759 	int rv;
   11760 
   11761 	rv = sc->phy.acquire(sc);
   11762 	if (rv != 0) {
   11763 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11764 		return rv;
   11765 	}
   11766 
   11767 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11768 	sc->phy.release(sc);
   11769 
   11770 	return rv;
   11771 }
   11772 
   11773 static int
   11774 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11775 {
   11776 	struct wm_softc *sc = device_private(dev);
   11777 	int rv;
   11778 
   11779 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11780 		switch (sc->sc_phytype) {
   11781 		case WMPHY_IGP:
   11782 		case WMPHY_IGP_2:
   11783 		case WMPHY_IGP_3:
   11784 			rv = wm_gmii_mdic_writereg(dev, phy,
   11785 			    IGPHY_PAGE_SELECT, reg);
   11786 			if (rv != 0)
   11787 				return rv;
   11788 			break;
   11789 		default:
   11790 #ifdef WM_DEBUG
   11791 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11792 			    __func__, sc->sc_phytype, reg);
   11793 #endif
   11794 			break;
   11795 		}
   11796 	}
   11797 
   11798 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11799 }
   11800 
   11801 /*
   11802  * wm_gmii_i80003_readreg:	[mii interface function]
   11803  *
   11804  *	Read a PHY register on the kumeran
   11805  * This could be handled by the PHY layer if we didn't have to lock the
   11806  * resource ...
   11807  */
   11808 static int
   11809 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11810 {
   11811 	struct wm_softc *sc = device_private(dev);
   11812 	int page_select;
   11813 	uint16_t temp, temp2;
   11814 	int rv;
   11815 
   11816 	if (phy != 1) /* Only one PHY on kumeran bus */
   11817 		return -1;
   11818 
   11819 	rv = sc->phy.acquire(sc);
   11820 	if (rv != 0) {
   11821 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11822 		return rv;
   11823 	}
   11824 
   11825 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11826 		page_select = GG82563_PHY_PAGE_SELECT;
   11827 	else {
   11828 		/*
   11829 		 * Use Alternative Page Select register to access registers
   11830 		 * 30 and 31.
   11831 		 */
   11832 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11833 	}
   11834 	temp = reg >> GG82563_PAGE_SHIFT;
   11835 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11836 		goto out;
   11837 
   11838 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11839 		/*
   11840 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11841 		 * register.
   11842 		 */
   11843 		delay(200);
   11844 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11845 		if ((rv != 0) || (temp2 != temp)) {
   11846 			device_printf(dev, "%s failed\n", __func__);
   11847 			rv = -1;
   11848 			goto out;
   11849 		}
   11850 		delay(200);
   11851 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11852 		delay(200);
   11853 	} else
   11854 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11855 
   11856 out:
   11857 	sc->phy.release(sc);
   11858 	return rv;
   11859 }
   11860 
   11861 /*
   11862  * wm_gmii_i80003_writereg:	[mii interface function]
   11863  *
   11864  *	Write a PHY register on the kumeran.
   11865  * This could be handled by the PHY layer if we didn't have to lock the
   11866  * resource ...
   11867  */
   11868 static int
   11869 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11870 {
   11871 	struct wm_softc *sc = device_private(dev);
   11872 	int page_select, rv;
   11873 	uint16_t temp, temp2;
   11874 
   11875 	if (phy != 1) /* Only one PHY on kumeran bus */
   11876 		return -1;
   11877 
   11878 	rv = sc->phy.acquire(sc);
   11879 	if (rv != 0) {
   11880 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11881 		return rv;
   11882 	}
   11883 
   11884 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11885 		page_select = GG82563_PHY_PAGE_SELECT;
   11886 	else {
   11887 		/*
   11888 		 * Use Alternative Page Select register to access registers
   11889 		 * 30 and 31.
   11890 		 */
   11891 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11892 	}
   11893 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11894 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11895 		goto out;
   11896 
   11897 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11898 		/*
   11899 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11900 		 * register.
   11901 		 */
   11902 		delay(200);
   11903 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11904 		if ((rv != 0) || (temp2 != temp)) {
   11905 			device_printf(dev, "%s failed\n", __func__);
   11906 			rv = -1;
   11907 			goto out;
   11908 		}
   11909 		delay(200);
   11910 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11911 		delay(200);
   11912 	} else
   11913 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11914 
   11915 out:
   11916 	sc->phy.release(sc);
   11917 	return rv;
   11918 }
   11919 
   11920 /*
   11921  * wm_gmii_bm_readreg:	[mii interface function]
   11922  *
   11923  *	Read a PHY register on the kumeran
   11924  * This could be handled by the PHY layer if we didn't have to lock the
   11925  * resource ...
   11926  */
   11927 static int
   11928 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11929 {
   11930 	struct wm_softc *sc = device_private(dev);
   11931 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11932 	int rv;
   11933 
   11934 	rv = sc->phy.acquire(sc);
   11935 	if (rv != 0) {
   11936 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11937 		return rv;
   11938 	}
   11939 
   11940 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11941 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11942 		    || (reg == 31)) ? 1 : phy;
   11943 	/* Page 800 works differently than the rest so it has its own func */
   11944 	if (page == BM_WUC_PAGE) {
   11945 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11946 		goto release;
   11947 	}
   11948 
   11949 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11950 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11951 		    && (sc->sc_type != WM_T_82583))
   11952 			rv = wm_gmii_mdic_writereg(dev, phy,
   11953 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11954 		else
   11955 			rv = wm_gmii_mdic_writereg(dev, phy,
   11956 			    BME1000_PHY_PAGE_SELECT, page);
   11957 		if (rv != 0)
   11958 			goto release;
   11959 	}
   11960 
   11961 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11962 
   11963 release:
   11964 	sc->phy.release(sc);
   11965 	return rv;
   11966 }
   11967 
   11968 /*
   11969  * wm_gmii_bm_writereg:	[mii interface function]
   11970  *
   11971  *	Write a PHY register on the kumeran.
   11972  * This could be handled by the PHY layer if we didn't have to lock the
   11973  * resource ...
   11974  */
   11975 static int
   11976 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11977 {
   11978 	struct wm_softc *sc = device_private(dev);
   11979 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11980 	int rv;
   11981 
   11982 	rv = sc->phy.acquire(sc);
   11983 	if (rv != 0) {
   11984 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11985 		return rv;
   11986 	}
   11987 
   11988 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11989 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11990 		    || (reg == 31)) ? 1 : phy;
   11991 	/* Page 800 works differently than the rest so it has its own func */
   11992 	if (page == BM_WUC_PAGE) {
   11993 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11994 		goto release;
   11995 	}
   11996 
   11997 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11998 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11999 		    && (sc->sc_type != WM_T_82583))
   12000 			rv = wm_gmii_mdic_writereg(dev, phy,
   12001 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12002 		else
   12003 			rv = wm_gmii_mdic_writereg(dev, phy,
   12004 			    BME1000_PHY_PAGE_SELECT, page);
   12005 		if (rv != 0)
   12006 			goto release;
   12007 	}
   12008 
   12009 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12010 
   12011 release:
   12012 	sc->phy.release(sc);
   12013 	return rv;
   12014 }
   12015 
   12016 /*
   12017  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   12018  *  @dev: pointer to the HW structure
   12019  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   12020  *
   12021  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   12022  *  address to store contents of the BM_WUC_ENABLE_REG register.
   12023  */
   12024 static int
   12025 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12026 {
   12027 #ifdef WM_DEBUG
   12028 	struct wm_softc *sc = device_private(dev);
   12029 #endif
   12030 	uint16_t temp;
   12031 	int rv;
   12032 
   12033 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12034 		device_xname(dev), __func__));
   12035 
   12036 	if (!phy_regp)
   12037 		return -1;
   12038 
   12039 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   12040 
   12041 	/* Select Port Control Registers page */
   12042 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12043 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12044 	if (rv != 0)
   12045 		return rv;
   12046 
   12047 	/* Read WUCE and save it */
   12048 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   12049 	if (rv != 0)
   12050 		return rv;
   12051 
   12052 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   12053 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   12054 	 */
   12055 	temp = *phy_regp;
   12056 	temp |= BM_WUC_ENABLE_BIT;
   12057 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   12058 
   12059 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   12060 		return rv;
   12061 
   12062 	/* Select Host Wakeup Registers page - caller now able to write
   12063 	 * registers on the Wakeup registers page
   12064 	 */
   12065 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12066 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   12067 }
   12068 
   12069 /*
   12070  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   12071  *  @dev: pointer to the HW structure
   12072  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   12073  *
   12074  *  Restore BM_WUC_ENABLE_REG to its original value.
   12075  *
   12076  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   12077  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   12078  *  caller.
   12079  */
   12080 static int
   12081 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12082 {
   12083 #ifdef WM_DEBUG
   12084 	struct wm_softc *sc = device_private(dev);
   12085 #endif
   12086 
   12087 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12088 		device_xname(dev), __func__));
   12089 
   12090 	if (!phy_regp)
   12091 		return -1;
   12092 
   12093 	/* Select Port Control Registers page */
   12094 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12095 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12096 
   12097 	/* Restore 769.17 to its original value */
   12098 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   12099 
   12100 	return 0;
   12101 }
   12102 
   12103 /*
   12104  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   12105  *  @sc: pointer to the HW structure
   12106  *  @offset: register offset to be read or written
   12107  *  @val: pointer to the data to read or write
   12108  *  @rd: determines if operation is read or write
   12109  *  @page_set: BM_WUC_PAGE already set and access enabled
   12110  *
   12111  *  Read the PHY register at offset and store the retrieved information in
   12112  *  data, or write data to PHY register at offset.  Note the procedure to
   12113  *  access the PHY wakeup registers is different than reading the other PHY
   12114  *  registers. It works as such:
   12115  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   12116  *  2) Set page to 800 for host (801 if we were manageability)
   12117  *  3) Write the address using the address opcode (0x11)
   12118  *  4) Read or write the data using the data opcode (0x12)
   12119  *  5) Restore 769.17.2 to its original value
   12120  *
   12121  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   12122  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   12123  *
   12124  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   12125  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   12126  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   12127  */
   12128 static int
   12129 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   12130 	bool page_set)
   12131 {
   12132 	struct wm_softc *sc = device_private(dev);
   12133 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   12134 	uint16_t page = BM_PHY_REG_PAGE(offset);
   12135 	uint16_t wuce;
   12136 	int rv = 0;
   12137 
   12138 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12139 		device_xname(dev), __func__));
   12140 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   12141 	if ((sc->sc_type == WM_T_PCH)
   12142 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   12143 		device_printf(dev,
   12144 		    "Attempting to access page %d while gig enabled.\n", page);
   12145 	}
   12146 
   12147 	if (!page_set) {
   12148 		/* Enable access to PHY wakeup registers */
   12149 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   12150 		if (rv != 0) {
   12151 			device_printf(dev,
   12152 			    "%s: Could not enable PHY wakeup reg access\n",
   12153 			    __func__);
   12154 			return rv;
   12155 		}
   12156 	}
   12157 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   12158 		device_xname(sc->sc_dev), __func__, page, regnum));
   12159 
   12160 	/*
   12161 	 * 2) Access PHY wakeup register.
   12162 	 * See wm_access_phy_wakeup_reg_bm.
   12163 	 */
   12164 
   12165 	/* Write the Wakeup register page offset value using opcode 0x11 */
   12166 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   12167 	if (rv != 0)
   12168 		return rv;
   12169 
   12170 	if (rd) {
   12171 		/* Read the Wakeup register page value using opcode 0x12 */
   12172 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   12173 	} else {
   12174 		/* Write the Wakeup register page value using opcode 0x12 */
   12175 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   12176 	}
   12177 	if (rv != 0)
   12178 		return rv;
   12179 
   12180 	if (!page_set)
   12181 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   12182 
   12183 	return rv;
   12184 }
   12185 
   12186 /*
   12187  * wm_gmii_hv_readreg:	[mii interface function]
   12188  *
   12189  *	Read a PHY register on the kumeran
   12190  * This could be handled by the PHY layer if we didn't have to lock the
   12191  * resource ...
   12192  */
   12193 static int
   12194 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12195 {
   12196 	struct wm_softc *sc = device_private(dev);
   12197 	int rv;
   12198 
   12199 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12200 		device_xname(dev), __func__));
   12201 
   12202 	rv = sc->phy.acquire(sc);
   12203 	if (rv != 0) {
   12204 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12205 		return rv;
   12206 	}
   12207 
   12208 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   12209 	sc->phy.release(sc);
   12210 	return rv;
   12211 }
   12212 
   12213 static int
   12214 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12215 {
   12216 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12217 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12218 	int rv;
   12219 
   12220 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12221 
   12222 	/* Page 800 works differently than the rest so it has its own func */
   12223 	if (page == BM_WUC_PAGE)
   12224 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12225 
   12226 	/*
   12227 	 * Lower than page 768 works differently than the rest so it has its
   12228 	 * own func
   12229 	 */
   12230 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12231 		device_printf(dev, "gmii_hv_readreg!!!\n");
   12232 		return -1;
   12233 	}
   12234 
   12235 	/*
   12236 	 * XXX I21[789] documents say that the SMBus Address register is at
   12237 	 * PHY address 01, Page 0 (not 768), Register 26.
   12238 	 */
   12239 	if (page == HV_INTC_FC_PAGE_START)
   12240 		page = 0;
   12241 
   12242 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12243 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12244 		    page << BME1000_PAGE_SHIFT);
   12245 		if (rv != 0)
   12246 			return rv;
   12247 	}
   12248 
   12249 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   12250 }
   12251 
   12252 /*
   12253  * wm_gmii_hv_writereg:	[mii interface function]
   12254  *
   12255  *	Write a PHY register on the kumeran.
   12256  * This could be handled by the PHY layer if we didn't have to lock the
   12257  * resource ...
   12258  */
   12259 static int
   12260 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   12261 {
   12262 	struct wm_softc *sc = device_private(dev);
   12263 	int rv;
   12264 
   12265 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12266 		device_xname(dev), __func__));
   12267 
   12268 	rv = sc->phy.acquire(sc);
   12269 	if (rv != 0) {
   12270 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12271 		return rv;
   12272 	}
   12273 
   12274 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   12275 	sc->phy.release(sc);
   12276 
   12277 	return rv;
   12278 }
   12279 
   12280 static int
   12281 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12282 {
   12283 	struct wm_softc *sc = device_private(dev);
   12284 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12285 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12286 	int rv;
   12287 
   12288 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12289 
   12290 	/* Page 800 works differently than the rest so it has its own func */
   12291 	if (page == BM_WUC_PAGE)
   12292 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   12293 		    false);
   12294 
   12295 	/*
   12296 	 * Lower than page 768 works differently than the rest so it has its
   12297 	 * own func
   12298 	 */
   12299 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12300 		device_printf(dev, "gmii_hv_writereg!!!\n");
   12301 		return -1;
   12302 	}
   12303 
   12304 	{
   12305 		/*
   12306 		 * XXX I21[789] documents say that the SMBus Address register
   12307 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   12308 		 */
   12309 		if (page == HV_INTC_FC_PAGE_START)
   12310 			page = 0;
   12311 
   12312 		/*
   12313 		 * XXX Workaround MDIO accesses being disabled after entering
   12314 		 * IEEE Power Down (whenever bit 11 of the PHY control
   12315 		 * register is set)
   12316 		 */
   12317 		if (sc->sc_phytype == WMPHY_82578) {
   12318 			struct mii_softc *child;
   12319 
   12320 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12321 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   12322 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   12323 			    && ((val & (1 << 11)) != 0)) {
   12324 				device_printf(dev, "XXX need workaround\n");
   12325 			}
   12326 		}
   12327 
   12328 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12329 			rv = wm_gmii_mdic_writereg(dev, 1,
   12330 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12331 			if (rv != 0)
   12332 				return rv;
   12333 		}
   12334 	}
   12335 
   12336 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   12337 }
   12338 
   12339 /*
   12340  * wm_gmii_82580_readreg:	[mii interface function]
   12341  *
   12342  *	Read a PHY register on the 82580 and I350.
   12343  * This could be handled by the PHY layer if we didn't have to lock the
   12344  * resource ...
   12345  */
   12346 static int
   12347 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12348 {
   12349 	struct wm_softc *sc = device_private(dev);
   12350 	int rv;
   12351 
   12352 	rv = sc->phy.acquire(sc);
   12353 	if (rv != 0) {
   12354 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12355 		return rv;
   12356 	}
   12357 
   12358 #ifdef DIAGNOSTIC
   12359 	if (reg > MII_ADDRMASK) {
   12360 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12361 		    __func__, sc->sc_phytype, reg);
   12362 		reg &= MII_ADDRMASK;
   12363 	}
   12364 #endif
   12365 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   12366 
   12367 	sc->phy.release(sc);
   12368 	return rv;
   12369 }
   12370 
   12371 /*
   12372  * wm_gmii_82580_writereg:	[mii interface function]
   12373  *
   12374  *	Write a PHY register on the 82580 and I350.
   12375  * This could be handled by the PHY layer if we didn't have to lock the
   12376  * resource ...
   12377  */
   12378 static int
   12379 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   12380 {
   12381 	struct wm_softc *sc = device_private(dev);
   12382 	int rv;
   12383 
   12384 	rv = sc->phy.acquire(sc);
   12385 	if (rv != 0) {
   12386 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12387 		return rv;
   12388 	}
   12389 
   12390 #ifdef DIAGNOSTIC
   12391 	if (reg > MII_ADDRMASK) {
   12392 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12393 		    __func__, sc->sc_phytype, reg);
   12394 		reg &= MII_ADDRMASK;
   12395 	}
   12396 #endif
   12397 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   12398 
   12399 	sc->phy.release(sc);
   12400 	return rv;
   12401 }
   12402 
   12403 /*
   12404  * wm_gmii_gs40g_readreg:	[mii interface function]
   12405  *
   12406  *	Read a PHY register on the I2100 and I211.
   12407  * This could be handled by the PHY layer if we didn't have to lock the
   12408  * resource ...
   12409  */
   12410 static int
   12411 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12412 {
   12413 	struct wm_softc *sc = device_private(dev);
   12414 	int page, offset;
   12415 	int rv;
   12416 
   12417 	/* Acquire semaphore */
   12418 	rv = sc->phy.acquire(sc);
   12419 	if (rv != 0) {
   12420 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12421 		return rv;
   12422 	}
   12423 
   12424 	/* Page select */
   12425 	page = reg >> GS40G_PAGE_SHIFT;
   12426 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12427 	if (rv != 0)
   12428 		goto release;
   12429 
   12430 	/* Read reg */
   12431 	offset = reg & GS40G_OFFSET_MASK;
   12432 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   12433 
   12434 release:
   12435 	sc->phy.release(sc);
   12436 	return rv;
   12437 }
   12438 
   12439 /*
   12440  * wm_gmii_gs40g_writereg:	[mii interface function]
   12441  *
   12442  *	Write a PHY register on the I210 and I211.
   12443  * This could be handled by the PHY layer if we didn't have to lock the
   12444  * resource ...
   12445  */
   12446 static int
   12447 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   12448 {
   12449 	struct wm_softc *sc = device_private(dev);
   12450 	uint16_t page;
   12451 	int offset, rv;
   12452 
   12453 	/* Acquire semaphore */
   12454 	rv = sc->phy.acquire(sc);
   12455 	if (rv != 0) {
   12456 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12457 		return rv;
   12458 	}
   12459 
   12460 	/* Page select */
   12461 	page = reg >> GS40G_PAGE_SHIFT;
   12462 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12463 	if (rv != 0)
   12464 		goto release;
   12465 
   12466 	/* Write reg */
   12467 	offset = reg & GS40G_OFFSET_MASK;
   12468 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   12469 
   12470 release:
   12471 	/* Release semaphore */
   12472 	sc->phy.release(sc);
   12473 	return rv;
   12474 }
   12475 
   12476 /*
   12477  * wm_gmii_statchg:	[mii interface function]
   12478  *
   12479  *	Callback from MII layer when media changes.
   12480  */
   12481 static void
   12482 wm_gmii_statchg(struct ifnet *ifp)
   12483 {
   12484 	struct wm_softc *sc = ifp->if_softc;
   12485 	struct mii_data *mii = &sc->sc_mii;
   12486 
   12487 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   12488 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12489 	sc->sc_fcrtl &= ~FCRTL_XONE;
   12490 
   12491 	/* Get flow control negotiation result. */
   12492 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   12493 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   12494 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   12495 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   12496 	}
   12497 
   12498 	if (sc->sc_flowflags & IFM_FLOW) {
   12499 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   12500 			sc->sc_ctrl |= CTRL_TFCE;
   12501 			sc->sc_fcrtl |= FCRTL_XONE;
   12502 		}
   12503 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   12504 			sc->sc_ctrl |= CTRL_RFCE;
   12505 	}
   12506 
   12507 	if (mii->mii_media_active & IFM_FDX) {
   12508 		DPRINTF(sc, WM_DEBUG_LINK,
   12509 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   12510 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12511 	} else {
   12512 		DPRINTF(sc, WM_DEBUG_LINK,
   12513 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   12514 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12515 	}
   12516 
   12517 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12518 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12519 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   12520 						 : WMREG_FCRTL, sc->sc_fcrtl);
   12521 	if (sc->sc_type == WM_T_80003) {
   12522 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   12523 		case IFM_1000_T:
   12524 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12525 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   12526 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   12527 			break;
   12528 		default:
   12529 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12530 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   12531 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   12532 			break;
   12533 		}
   12534 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   12535 	}
   12536 }
   12537 
   12538 /* kumeran related (80003, ICH* and PCH*) */
   12539 
   12540 /*
   12541  * wm_kmrn_readreg:
   12542  *
   12543  *	Read a kumeran register
   12544  */
   12545 static int
   12546 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   12547 {
   12548 	int rv;
   12549 
   12550 	if (sc->sc_type == WM_T_80003)
   12551 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12552 	else
   12553 		rv = sc->phy.acquire(sc);
   12554 	if (rv != 0) {
   12555 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12556 		    __func__);
   12557 		return rv;
   12558 	}
   12559 
   12560 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   12561 
   12562 	if (sc->sc_type == WM_T_80003)
   12563 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12564 	else
   12565 		sc->phy.release(sc);
   12566 
   12567 	return rv;
   12568 }
   12569 
   12570 static int
   12571 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   12572 {
   12573 
   12574 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12575 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   12576 	    KUMCTRLSTA_REN);
   12577 	CSR_WRITE_FLUSH(sc);
   12578 	delay(2);
   12579 
   12580 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   12581 
   12582 	return 0;
   12583 }
   12584 
   12585 /*
   12586  * wm_kmrn_writereg:
   12587  *
   12588  *	Write a kumeran register
   12589  */
   12590 static int
   12591 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   12592 {
   12593 	int rv;
   12594 
   12595 	if (sc->sc_type == WM_T_80003)
   12596 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12597 	else
   12598 		rv = sc->phy.acquire(sc);
   12599 	if (rv != 0) {
   12600 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12601 		    __func__);
   12602 		return rv;
   12603 	}
   12604 
   12605 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   12606 
   12607 	if (sc->sc_type == WM_T_80003)
   12608 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12609 	else
   12610 		sc->phy.release(sc);
   12611 
   12612 	return rv;
   12613 }
   12614 
   12615 static int
   12616 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   12617 {
   12618 
   12619 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12620 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   12621 
   12622 	return 0;
   12623 }
   12624 
   12625 /*
   12626  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   12627  * This access method is different from IEEE MMD.
   12628  */
   12629 static int
   12630 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   12631 {
   12632 	struct wm_softc *sc = device_private(dev);
   12633 	int rv;
   12634 
   12635 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   12636 	if (rv != 0)
   12637 		return rv;
   12638 
   12639 	if (rd)
   12640 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12641 	else
   12642 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12643 	return rv;
   12644 }
   12645 
   12646 static int
   12647 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12648 {
   12649 
   12650 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12651 }
   12652 
   12653 static int
   12654 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12655 {
   12656 
   12657 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12658 }
   12659 
   12660 /* SGMII related */
   12661 
   12662 /*
   12663  * wm_sgmii_uses_mdio
   12664  *
   12665  * Check whether the transaction is to the internal PHY or the external
   12666  * MDIO interface. Return true if it's MDIO.
   12667  */
   12668 static bool
   12669 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12670 {
   12671 	uint32_t reg;
   12672 	bool ismdio = false;
   12673 
   12674 	switch (sc->sc_type) {
   12675 	case WM_T_82575:
   12676 	case WM_T_82576:
   12677 		reg = CSR_READ(sc, WMREG_MDIC);
   12678 		ismdio = ((reg & MDIC_DEST) != 0);
   12679 		break;
   12680 	case WM_T_82580:
   12681 	case WM_T_I350:
   12682 	case WM_T_I354:
   12683 	case WM_T_I210:
   12684 	case WM_T_I211:
   12685 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12686 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12687 		break;
   12688 	default:
   12689 		break;
   12690 	}
   12691 
   12692 	return ismdio;
   12693 }
   12694 
   12695 /* Setup internal SGMII PHY for SFP */
   12696 static void
   12697 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12698 {
   12699 	uint16_t id1, id2, phyreg;
   12700 	int i, rv;
   12701 
   12702 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12703 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12704 		return;
   12705 
   12706 	for (i = 0; i < MII_NPHY; i++) {
   12707 		sc->phy.no_errprint = true;
   12708 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12709 		if (rv != 0)
   12710 			continue;
   12711 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12712 		if (rv != 0)
   12713 			continue;
   12714 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12715 			continue;
   12716 		sc->phy.no_errprint = false;
   12717 
   12718 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12719 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12720 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12721 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12722 		break;
   12723 	}
   12724 
   12725 }
   12726 
   12727 /*
   12728  * wm_sgmii_readreg:	[mii interface function]
   12729  *
   12730  *	Read a PHY register on the SGMII
   12731  * This could be handled by the PHY layer if we didn't have to lock the
   12732  * resource ...
   12733  */
   12734 static int
   12735 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12736 {
   12737 	struct wm_softc *sc = device_private(dev);
   12738 	int rv;
   12739 
   12740 	rv = sc->phy.acquire(sc);
   12741 	if (rv != 0) {
   12742 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12743 		return rv;
   12744 	}
   12745 
   12746 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12747 
   12748 	sc->phy.release(sc);
   12749 	return rv;
   12750 }
   12751 
   12752 static int
   12753 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12754 {
   12755 	struct wm_softc *sc = device_private(dev);
   12756 	uint32_t i2ccmd;
   12757 	int i, rv = 0;
   12758 
   12759 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12760 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12761 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12762 
   12763 	/* Poll the ready bit */
   12764 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12765 		delay(50);
   12766 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12767 		if (i2ccmd & I2CCMD_READY)
   12768 			break;
   12769 	}
   12770 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12771 		device_printf(dev, "I2CCMD Read did not complete\n");
   12772 		rv = ETIMEDOUT;
   12773 	}
   12774 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12775 		if (!sc->phy.no_errprint)
   12776 			device_printf(dev, "I2CCMD Error bit set\n");
   12777 		rv = EIO;
   12778 	}
   12779 
   12780 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12781 
   12782 	return rv;
   12783 }
   12784 
   12785 /*
   12786  * wm_sgmii_writereg:	[mii interface function]
   12787  *
   12788  *	Write a PHY register on the SGMII.
   12789  * This could be handled by the PHY layer if we didn't have to lock the
   12790  * resource ...
   12791  */
   12792 static int
   12793 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12794 {
   12795 	struct wm_softc *sc = device_private(dev);
   12796 	int rv;
   12797 
   12798 	rv = sc->phy.acquire(sc);
   12799 	if (rv != 0) {
   12800 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12801 		return rv;
   12802 	}
   12803 
   12804 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12805 
   12806 	sc->phy.release(sc);
   12807 
   12808 	return rv;
   12809 }
   12810 
   12811 static int
   12812 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12813 {
   12814 	struct wm_softc *sc = device_private(dev);
   12815 	uint32_t i2ccmd;
   12816 	uint16_t swapdata;
   12817 	int rv = 0;
   12818 	int i;
   12819 
   12820 	/* Swap the data bytes for the I2C interface */
   12821 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12822 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12823 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12824 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12825 
   12826 	/* Poll the ready bit */
   12827 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12828 		delay(50);
   12829 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12830 		if (i2ccmd & I2CCMD_READY)
   12831 			break;
   12832 	}
   12833 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12834 		device_printf(dev, "I2CCMD Write did not complete\n");
   12835 		rv = ETIMEDOUT;
   12836 	}
   12837 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12838 		device_printf(dev, "I2CCMD Error bit set\n");
   12839 		rv = EIO;
   12840 	}
   12841 
   12842 	return rv;
   12843 }
   12844 
   12845 /* TBI related */
   12846 
   12847 static bool
   12848 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12849 {
   12850 	bool sig;
   12851 
   12852 	sig = ctrl & CTRL_SWDPIN(1);
   12853 
   12854 	/*
   12855 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12856 	 * detect a signal, 1 if they don't.
   12857 	 */
   12858 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12859 		sig = !sig;
   12860 
   12861 	return sig;
   12862 }
   12863 
   12864 /*
   12865  * wm_tbi_mediainit:
   12866  *
   12867  *	Initialize media for use on 1000BASE-X devices.
   12868  */
   12869 static void
   12870 wm_tbi_mediainit(struct wm_softc *sc)
   12871 {
   12872 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12873 	const char *sep = "";
   12874 
   12875 	if (sc->sc_type < WM_T_82543)
   12876 		sc->sc_tipg = TIPG_WM_DFLT;
   12877 	else
   12878 		sc->sc_tipg = TIPG_LG_DFLT;
   12879 
   12880 	sc->sc_tbi_serdes_anegticks = 5;
   12881 
   12882 	/* Initialize our media structures */
   12883 	sc->sc_mii.mii_ifp = ifp;
   12884 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12885 
   12886 	ifp->if_baudrate = IF_Gbps(1);
   12887 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12888 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12889 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12890 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12891 		    sc->sc_core_lock);
   12892 	} else {
   12893 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12894 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12895 	}
   12896 
   12897 	/*
   12898 	 * SWD Pins:
   12899 	 *
   12900 	 *	0 = Link LED (output)
   12901 	 *	1 = Loss Of Signal (input)
   12902 	 */
   12903 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12904 
   12905 	/* XXX Perhaps this is only for TBI */
   12906 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12907 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12908 
   12909 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12910 		sc->sc_ctrl &= ~CTRL_LRST;
   12911 
   12912 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12913 
   12914 #define	ADD(ss, mm, dd)							  \
   12915 do {									  \
   12916 	aprint_normal("%s%s", sep, ss);					  \
   12917 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12918 	sep = ", ";							  \
   12919 } while (/*CONSTCOND*/0)
   12920 
   12921 	aprint_normal_dev(sc->sc_dev, "");
   12922 
   12923 	if (sc->sc_type == WM_T_I354) {
   12924 		uint32_t status;
   12925 
   12926 		status = CSR_READ(sc, WMREG_STATUS);
   12927 		if (((status & STATUS_2P5_SKU) != 0)
   12928 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12929 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12930 		} else
   12931 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12932 	} else if (sc->sc_type == WM_T_82545) {
   12933 		/* Only 82545 is LX (XXX except SFP) */
   12934 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12935 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12936 	} else if (sc->sc_sfptype != 0) {
   12937 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12938 		switch (sc->sc_sfptype) {
   12939 		default:
   12940 		case SFF_SFP_ETH_FLAGS_1000SX:
   12941 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12942 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12943 			break;
   12944 		case SFF_SFP_ETH_FLAGS_1000LX:
   12945 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12946 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12947 			break;
   12948 		case SFF_SFP_ETH_FLAGS_1000CX:
   12949 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12950 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12951 			break;
   12952 		case SFF_SFP_ETH_FLAGS_1000T:
   12953 			ADD("1000baseT", IFM_1000_T, 0);
   12954 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12955 			break;
   12956 		case SFF_SFP_ETH_FLAGS_100FX:
   12957 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12958 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12959 			break;
   12960 		}
   12961 	} else {
   12962 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12963 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12964 	}
   12965 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12966 	aprint_normal("\n");
   12967 
   12968 #undef ADD
   12969 
   12970 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12971 }
   12972 
   12973 /*
   12974  * wm_tbi_mediachange:	[ifmedia interface function]
   12975  *
   12976  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12977  */
   12978 static int
   12979 wm_tbi_mediachange(struct ifnet *ifp)
   12980 {
   12981 	struct wm_softc *sc = ifp->if_softc;
   12982 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12983 	uint32_t status, ctrl;
   12984 	bool signal;
   12985 	int i;
   12986 
   12987 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12988 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12989 		/* XXX need some work for >= 82571 and < 82575 */
   12990 		if (sc->sc_type < WM_T_82575)
   12991 			return 0;
   12992 	}
   12993 
   12994 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12995 	    || (sc->sc_type >= WM_T_82575))
   12996 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12997 
   12998 	sc->sc_ctrl &= ~CTRL_LRST;
   12999 	sc->sc_txcw = TXCW_ANE;
   13000 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13001 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   13002 	else if (ife->ifm_media & IFM_FDX)
   13003 		sc->sc_txcw |= TXCW_FD;
   13004 	else
   13005 		sc->sc_txcw |= TXCW_HD;
   13006 
   13007 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   13008 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   13009 
   13010 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   13011 		device_xname(sc->sc_dev), sc->sc_txcw));
   13012 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13013 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13014 	CSR_WRITE_FLUSH(sc);
   13015 	delay(1000);
   13016 
   13017 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13018 	signal = wm_tbi_havesignal(sc, ctrl);
   13019 
   13020 	DPRINTF(sc, WM_DEBUG_LINK,
   13021 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
   13022 
   13023 	if (signal) {
   13024 		/* Have signal; wait for the link to come up. */
   13025 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   13026 			delay(10000);
   13027 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   13028 				break;
   13029 		}
   13030 
   13031 		DPRINTF(sc, WM_DEBUG_LINK,
   13032 		    ("%s: i = %d after waiting for link\n",
   13033 			device_xname(sc->sc_dev), i));
   13034 
   13035 		status = CSR_READ(sc, WMREG_STATUS);
   13036 		DPRINTF(sc, WM_DEBUG_LINK,
   13037 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
   13038 			__PRIxBIT "\n",
   13039 			device_xname(sc->sc_dev), status, STATUS_LU));
   13040 		if (status & STATUS_LU) {
   13041 			/* Link is up. */
   13042 			DPRINTF(sc, WM_DEBUG_LINK,
   13043 			    ("%s: LINK: set media -> link up %s\n",
   13044 				device_xname(sc->sc_dev),
   13045 				(status & STATUS_FD) ? "FDX" : "HDX"));
   13046 
   13047 			/*
   13048 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   13049 			 * so we should update sc->sc_ctrl
   13050 			 */
   13051 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   13052 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   13053 			sc->sc_fcrtl &= ~FCRTL_XONE;
   13054 			if (status & STATUS_FD)
   13055 				sc->sc_tctl |=
   13056 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   13057 			else
   13058 				sc->sc_tctl |=
   13059 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   13060 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   13061 				sc->sc_fcrtl |= FCRTL_XONE;
   13062 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   13063 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   13064 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   13065 			sc->sc_tbi_linkup = 1;
   13066 		} else {
   13067 			if (i == WM_LINKUP_TIMEOUT)
   13068 				wm_check_for_link(sc);
   13069 			/* Link is down. */
   13070 			DPRINTF(sc, WM_DEBUG_LINK,
   13071 			    ("%s: LINK: set media -> link down\n",
   13072 				device_xname(sc->sc_dev)));
   13073 			sc->sc_tbi_linkup = 0;
   13074 		}
   13075 	} else {
   13076 		DPRINTF(sc, WM_DEBUG_LINK,
   13077 		    ("%s: LINK: set media -> no signal\n",
   13078 			device_xname(sc->sc_dev)));
   13079 		sc->sc_tbi_linkup = 0;
   13080 	}
   13081 
   13082 	wm_tbi_serdes_set_linkled(sc);
   13083 
   13084 	return 0;
   13085 }
   13086 
   13087 /*
   13088  * wm_tbi_mediastatus:	[ifmedia interface function]
   13089  *
   13090  *	Get the current interface media status on a 1000BASE-X device.
   13091  */
   13092 static void
   13093 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13094 {
   13095 	struct wm_softc *sc = ifp->if_softc;
   13096 	uint32_t ctrl, status;
   13097 
   13098 	ifmr->ifm_status = IFM_AVALID;
   13099 	ifmr->ifm_active = IFM_ETHER;
   13100 
   13101 	status = CSR_READ(sc, WMREG_STATUS);
   13102 	if ((status & STATUS_LU) == 0) {
   13103 		ifmr->ifm_active |= IFM_NONE;
   13104 		return;
   13105 	}
   13106 
   13107 	ifmr->ifm_status |= IFM_ACTIVE;
   13108 	/* Only 82545 is LX */
   13109 	if (sc->sc_type == WM_T_82545)
   13110 		ifmr->ifm_active |= IFM_1000_LX;
   13111 	else
   13112 		ifmr->ifm_active |= IFM_1000_SX;
   13113 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   13114 		ifmr->ifm_active |= IFM_FDX;
   13115 	else
   13116 		ifmr->ifm_active |= IFM_HDX;
   13117 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13118 	if (ctrl & CTRL_RFCE)
   13119 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   13120 	if (ctrl & CTRL_TFCE)
   13121 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   13122 }
   13123 
   13124 /* XXX TBI only */
   13125 static int
   13126 wm_check_for_link(struct wm_softc *sc)
   13127 {
   13128 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13129 	uint32_t rxcw;
   13130 	uint32_t ctrl;
   13131 	uint32_t status;
   13132 	bool signal;
   13133 
   13134 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   13135 		device_xname(sc->sc_dev), __func__));
   13136 
   13137 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13138 		/* XXX need some work for >= 82571 */
   13139 		if (sc->sc_type >= WM_T_82571) {
   13140 			sc->sc_tbi_linkup = 1;
   13141 			return 0;
   13142 		}
   13143 	}
   13144 
   13145 	rxcw = CSR_READ(sc, WMREG_RXCW);
   13146 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13147 	status = CSR_READ(sc, WMREG_STATUS);
   13148 	signal = wm_tbi_havesignal(sc, ctrl);
   13149 
   13150 	DPRINTF(sc, WM_DEBUG_LINK,
   13151 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   13152 		device_xname(sc->sc_dev), __func__, signal,
   13153 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   13154 
   13155 	/*
   13156 	 * SWDPIN   LU RXCW
   13157 	 *	0    0	  0
   13158 	 *	0    0	  1	(should not happen)
   13159 	 *	0    1	  0	(should not happen)
   13160 	 *	0    1	  1	(should not happen)
   13161 	 *	1    0	  0	Disable autonego and force linkup
   13162 	 *	1    0	  1	got /C/ but not linkup yet
   13163 	 *	1    1	  0	(linkup)
   13164 	 *	1    1	  1	If IFM_AUTO, back to autonego
   13165 	 *
   13166 	 */
   13167 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   13168 		DPRINTF(sc, WM_DEBUG_LINK,
   13169 		    ("%s: %s: force linkup and fullduplex\n",
   13170 			device_xname(sc->sc_dev), __func__));
   13171 		sc->sc_tbi_linkup = 0;
   13172 		/* Disable auto-negotiation in the TXCW register */
   13173 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   13174 
   13175 		/*
   13176 		 * Force link-up and also force full-duplex.
   13177 		 *
   13178 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   13179 		 * so we should update sc->sc_ctrl
   13180 		 */
   13181 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   13182 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13183 	} else if (((status & STATUS_LU) != 0)
   13184 	    && ((rxcw & RXCW_C) != 0)
   13185 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   13186 		sc->sc_tbi_linkup = 1;
   13187 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   13188 			device_xname(sc->sc_dev), __func__));
   13189 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13190 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   13191 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   13192 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   13193 			device_xname(sc->sc_dev), __func__));
   13194 	} else {
   13195 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   13196 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   13197 			status));
   13198 	}
   13199 
   13200 	return 0;
   13201 }
   13202 
   13203 /*
   13204  * wm_tbi_tick:
   13205  *
   13206  *	Check the link on TBI devices.
   13207  *	This function acts as mii_tick().
   13208  */
   13209 static void
   13210 wm_tbi_tick(struct wm_softc *sc)
   13211 {
   13212 	struct mii_data *mii = &sc->sc_mii;
   13213 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13214 	uint32_t status;
   13215 
   13216 	KASSERT(WM_CORE_LOCKED(sc));
   13217 
   13218 	status = CSR_READ(sc, WMREG_STATUS);
   13219 
   13220 	/* XXX is this needed? */
   13221 	(void)CSR_READ(sc, WMREG_RXCW);
   13222 	(void)CSR_READ(sc, WMREG_CTRL);
   13223 
   13224 	/* set link status */
   13225 	if ((status & STATUS_LU) == 0) {
   13226 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   13227 			device_xname(sc->sc_dev)));
   13228 		sc->sc_tbi_linkup = 0;
   13229 	} else if (sc->sc_tbi_linkup == 0) {
   13230 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   13231 			device_xname(sc->sc_dev),
   13232 			(status & STATUS_FD) ? "FDX" : "HDX"));
   13233 		sc->sc_tbi_linkup = 1;
   13234 		sc->sc_tbi_serdes_ticks = 0;
   13235 	}
   13236 
   13237 	if ((sc->sc_if_flags & IFF_UP) == 0)
   13238 		goto setled;
   13239 
   13240 	if ((status & STATUS_LU) == 0) {
   13241 		sc->sc_tbi_linkup = 0;
   13242 		/* If the timer expired, retry autonegotiation */
   13243 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13244 		    && (++sc->sc_tbi_serdes_ticks
   13245 			>= sc->sc_tbi_serdes_anegticks)) {
   13246 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13247 				device_xname(sc->sc_dev), __func__));
   13248 			sc->sc_tbi_serdes_ticks = 0;
   13249 			/*
   13250 			 * Reset the link, and let autonegotiation do
   13251 			 * its thing
   13252 			 */
   13253 			sc->sc_ctrl |= CTRL_LRST;
   13254 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13255 			CSR_WRITE_FLUSH(sc);
   13256 			delay(1000);
   13257 			sc->sc_ctrl &= ~CTRL_LRST;
   13258 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13259 			CSR_WRITE_FLUSH(sc);
   13260 			delay(1000);
   13261 			CSR_WRITE(sc, WMREG_TXCW,
   13262 			    sc->sc_txcw & ~TXCW_ANE);
   13263 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13264 		}
   13265 	}
   13266 
   13267 setled:
   13268 	wm_tbi_serdes_set_linkled(sc);
   13269 }
   13270 
   13271 /* SERDES related */
   13272 static void
   13273 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   13274 {
   13275 	uint32_t reg;
   13276 
   13277 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13278 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13279 		return;
   13280 
   13281 	/* Enable PCS to turn on link */
   13282 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   13283 	reg |= PCS_CFG_PCS_EN;
   13284 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   13285 
   13286 	/* Power up the laser */
   13287 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13288 	reg &= ~CTRL_EXT_SWDPIN(3);
   13289 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13290 
   13291 	/* Flush the write to verify completion */
   13292 	CSR_WRITE_FLUSH(sc);
   13293 	delay(1000);
   13294 }
   13295 
   13296 static int
   13297 wm_serdes_mediachange(struct ifnet *ifp)
   13298 {
   13299 	struct wm_softc *sc = ifp->if_softc;
   13300 	bool pcs_autoneg = true; /* XXX */
   13301 	uint32_t ctrl_ext, pcs_lctl, reg;
   13302 
   13303 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13304 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13305 		return 0;
   13306 
   13307 	/* XXX Currently, this function is not called on 8257[12] */
   13308 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13309 	    || (sc->sc_type >= WM_T_82575))
   13310 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13311 
   13312 	/* Power on the sfp cage if present */
   13313 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13314 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13315 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   13316 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13317 
   13318 	sc->sc_ctrl |= CTRL_SLU;
   13319 
   13320 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   13321 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   13322 
   13323 		reg = CSR_READ(sc, WMREG_CONNSW);
   13324 		reg |= CONNSW_ENRGSRC;
   13325 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   13326 	}
   13327 
   13328 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   13329 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   13330 	case CTRL_EXT_LINK_MODE_SGMII:
   13331 		/* SGMII mode lets the phy handle forcing speed/duplex */
   13332 		pcs_autoneg = true;
   13333 		/* Autoneg time out should be disabled for SGMII mode */
   13334 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   13335 		break;
   13336 	case CTRL_EXT_LINK_MODE_1000KX:
   13337 		pcs_autoneg = false;
   13338 		/* FALLTHROUGH */
   13339 	default:
   13340 		if ((sc->sc_type == WM_T_82575)
   13341 		    || (sc->sc_type == WM_T_82576)) {
   13342 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   13343 				pcs_autoneg = false;
   13344 		}
   13345 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   13346 		    | CTRL_FRCFDX;
   13347 
   13348 		/* Set speed of 1000/Full if speed/duplex is forced */
   13349 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   13350 	}
   13351 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13352 
   13353 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   13354 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   13355 
   13356 	if (pcs_autoneg) {
   13357 		/* Set PCS register for autoneg */
   13358 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   13359 
   13360 		/* Disable force flow control for autoneg */
   13361 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   13362 
   13363 		/* Configure flow control advertisement for autoneg */
   13364 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   13365 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   13366 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   13367 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   13368 	} else
   13369 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   13370 
   13371 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   13372 
   13373 	return 0;
   13374 }
   13375 
   13376 static void
   13377 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13378 {
   13379 	struct wm_softc *sc = ifp->if_softc;
   13380 	struct mii_data *mii = &sc->sc_mii;
   13381 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13382 	uint32_t pcs_adv, pcs_lpab, reg;
   13383 
   13384 	ifmr->ifm_status = IFM_AVALID;
   13385 	ifmr->ifm_active = IFM_ETHER;
   13386 
   13387 	/* Check PCS */
   13388 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13389 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   13390 		ifmr->ifm_active |= IFM_NONE;
   13391 		sc->sc_tbi_linkup = 0;
   13392 		goto setled;
   13393 	}
   13394 
   13395 	sc->sc_tbi_linkup = 1;
   13396 	ifmr->ifm_status |= IFM_ACTIVE;
   13397 	if (sc->sc_type == WM_T_I354) {
   13398 		uint32_t status;
   13399 
   13400 		status = CSR_READ(sc, WMREG_STATUS);
   13401 		if (((status & STATUS_2P5_SKU) != 0)
   13402 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13403 			ifmr->ifm_active |= IFM_2500_KX;
   13404 		} else
   13405 			ifmr->ifm_active |= IFM_1000_KX;
   13406 	} else {
   13407 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   13408 		case PCS_LSTS_SPEED_10:
   13409 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   13410 			break;
   13411 		case PCS_LSTS_SPEED_100:
   13412 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   13413 			break;
   13414 		case PCS_LSTS_SPEED_1000:
   13415 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13416 			break;
   13417 		default:
   13418 			device_printf(sc->sc_dev, "Unknown speed\n");
   13419 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13420 			break;
   13421 		}
   13422 	}
   13423 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   13424 	if ((reg & PCS_LSTS_FDX) != 0)
   13425 		ifmr->ifm_active |= IFM_FDX;
   13426 	else
   13427 		ifmr->ifm_active |= IFM_HDX;
   13428 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   13429 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   13430 		/* Check flow */
   13431 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13432 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   13433 			DPRINTF(sc, WM_DEBUG_LINK,
   13434 			    ("XXX LINKOK but not ACOMP\n"));
   13435 			goto setled;
   13436 		}
   13437 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   13438 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   13439 		DPRINTF(sc, WM_DEBUG_LINK,
   13440 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   13441 		if ((pcs_adv & TXCW_SYM_PAUSE)
   13442 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   13443 			mii->mii_media_active |= IFM_FLOW
   13444 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   13445 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   13446 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13447 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   13448 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13449 			mii->mii_media_active |= IFM_FLOW
   13450 			    | IFM_ETH_TXPAUSE;
   13451 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   13452 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13453 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   13454 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13455 			mii->mii_media_active |= IFM_FLOW
   13456 			    | IFM_ETH_RXPAUSE;
   13457 		}
   13458 	}
   13459 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   13460 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   13461 setled:
   13462 	wm_tbi_serdes_set_linkled(sc);
   13463 }
   13464 
   13465 /*
   13466  * wm_serdes_tick:
   13467  *
   13468  *	Check the link on serdes devices.
   13469  */
   13470 static void
   13471 wm_serdes_tick(struct wm_softc *sc)
   13472 {
   13473 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13474 	struct mii_data *mii = &sc->sc_mii;
   13475 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13476 	uint32_t reg;
   13477 
   13478 	KASSERT(WM_CORE_LOCKED(sc));
   13479 
   13480 	mii->mii_media_status = IFM_AVALID;
   13481 	mii->mii_media_active = IFM_ETHER;
   13482 
   13483 	/* Check PCS */
   13484 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13485 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   13486 		mii->mii_media_status |= IFM_ACTIVE;
   13487 		sc->sc_tbi_linkup = 1;
   13488 		sc->sc_tbi_serdes_ticks = 0;
   13489 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   13490 		if ((reg & PCS_LSTS_FDX) != 0)
   13491 			mii->mii_media_active |= IFM_FDX;
   13492 		else
   13493 			mii->mii_media_active |= IFM_HDX;
   13494 	} else {
   13495 		mii->mii_media_status |= IFM_NONE;
   13496 		sc->sc_tbi_linkup = 0;
   13497 		/* If the timer expired, retry autonegotiation */
   13498 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13499 		    && (++sc->sc_tbi_serdes_ticks
   13500 			>= sc->sc_tbi_serdes_anegticks)) {
   13501 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13502 				device_xname(sc->sc_dev), __func__));
   13503 			sc->sc_tbi_serdes_ticks = 0;
   13504 			/* XXX */
   13505 			wm_serdes_mediachange(ifp);
   13506 		}
   13507 	}
   13508 
   13509 	wm_tbi_serdes_set_linkled(sc);
   13510 }
   13511 
   13512 /* SFP related */
   13513 
   13514 static int
   13515 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   13516 {
   13517 	uint32_t i2ccmd;
   13518 	int i;
   13519 
   13520 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13521 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13522 
   13523 	/* Poll the ready bit */
   13524 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13525 		delay(50);
   13526 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13527 		if (i2ccmd & I2CCMD_READY)
   13528 			break;
   13529 	}
   13530 	if ((i2ccmd & I2CCMD_READY) == 0)
   13531 		return -1;
   13532 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   13533 		return -1;
   13534 
   13535 	*data = i2ccmd & 0x00ff;
   13536 
   13537 	return 0;
   13538 }
   13539 
   13540 static uint32_t
   13541 wm_sfp_get_media_type(struct wm_softc *sc)
   13542 {
   13543 	uint32_t ctrl_ext;
   13544 	uint8_t val = 0;
   13545 	int timeout = 3;
   13546 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   13547 	int rv = -1;
   13548 
   13549 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13550 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13551 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   13552 	CSR_WRITE_FLUSH(sc);
   13553 
   13554 	/* Read SFP module data */
   13555 	while (timeout) {
   13556 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   13557 		if (rv == 0)
   13558 			break;
   13559 		delay(100*1000); /* XXX too big */
   13560 		timeout--;
   13561 	}
   13562 	if (rv != 0)
   13563 		goto out;
   13564 
   13565 	switch (val) {
   13566 	case SFF_SFP_ID_SFF:
   13567 		aprint_normal_dev(sc->sc_dev,
   13568 		    "Module/Connector soldered to board\n");
   13569 		break;
   13570 	case SFF_SFP_ID_SFP:
   13571 		sc->sc_flags |= WM_F_SFP;
   13572 		break;
   13573 	case SFF_SFP_ID_UNKNOWN:
   13574 		goto out;
   13575 	default:
   13576 		break;
   13577 	}
   13578 
   13579 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   13580 	if (rv != 0)
   13581 		goto out;
   13582 
   13583 	sc->sc_sfptype = val;
   13584 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   13585 		mediatype = WM_MEDIATYPE_SERDES;
   13586 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   13587 		sc->sc_flags |= WM_F_SGMII;
   13588 		mediatype = WM_MEDIATYPE_COPPER;
   13589 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   13590 		sc->sc_flags |= WM_F_SGMII;
   13591 		mediatype = WM_MEDIATYPE_SERDES;
   13592 	} else {
   13593 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   13594 		    __func__, sc->sc_sfptype);
   13595 		sc->sc_sfptype = 0; /* XXX unknown */
   13596 	}
   13597 
   13598 out:
   13599 	/* Restore I2C interface setting */
   13600 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13601 
   13602 	return mediatype;
   13603 }
   13604 
   13605 /*
   13606  * NVM related.
   13607  * Microwire, SPI (w/wo EERD) and Flash.
   13608  */
   13609 
   13610 /* Both spi and uwire */
   13611 
   13612 /*
   13613  * wm_eeprom_sendbits:
   13614  *
   13615  *	Send a series of bits to the EEPROM.
   13616  */
   13617 static void
   13618 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   13619 {
   13620 	uint32_t reg;
   13621 	int x;
   13622 
   13623 	reg = CSR_READ(sc, WMREG_EECD);
   13624 
   13625 	for (x = nbits; x > 0; x--) {
   13626 		if (bits & (1U << (x - 1)))
   13627 			reg |= EECD_DI;
   13628 		else
   13629 			reg &= ~EECD_DI;
   13630 		CSR_WRITE(sc, WMREG_EECD, reg);
   13631 		CSR_WRITE_FLUSH(sc);
   13632 		delay(2);
   13633 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13634 		CSR_WRITE_FLUSH(sc);
   13635 		delay(2);
   13636 		CSR_WRITE(sc, WMREG_EECD, reg);
   13637 		CSR_WRITE_FLUSH(sc);
   13638 		delay(2);
   13639 	}
   13640 }
   13641 
   13642 /*
   13643  * wm_eeprom_recvbits:
   13644  *
   13645  *	Receive a series of bits from the EEPROM.
   13646  */
   13647 static void
   13648 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13649 {
   13650 	uint32_t reg, val;
   13651 	int x;
   13652 
   13653 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13654 
   13655 	val = 0;
   13656 	for (x = nbits; x > 0; x--) {
   13657 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13658 		CSR_WRITE_FLUSH(sc);
   13659 		delay(2);
   13660 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13661 			val |= (1U << (x - 1));
   13662 		CSR_WRITE(sc, WMREG_EECD, reg);
   13663 		CSR_WRITE_FLUSH(sc);
   13664 		delay(2);
   13665 	}
   13666 	*valp = val;
   13667 }
   13668 
   13669 /* Microwire */
   13670 
   13671 /*
   13672  * wm_nvm_read_uwire:
   13673  *
   13674  *	Read a word from the EEPROM using the MicroWire protocol.
   13675  */
   13676 static int
   13677 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13678 {
   13679 	uint32_t reg, val;
   13680 	int i, rv;
   13681 
   13682 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13683 		device_xname(sc->sc_dev), __func__));
   13684 
   13685 	rv = sc->nvm.acquire(sc);
   13686 	if (rv != 0)
   13687 		return rv;
   13688 
   13689 	for (i = 0; i < wordcnt; i++) {
   13690 		/* Clear SK and DI. */
   13691 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13692 		CSR_WRITE(sc, WMREG_EECD, reg);
   13693 
   13694 		/*
   13695 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13696 		 * and Xen.
   13697 		 *
   13698 		 * We use this workaround only for 82540 because qemu's
   13699 		 * e1000 act as 82540.
   13700 		 */
   13701 		if (sc->sc_type == WM_T_82540) {
   13702 			reg |= EECD_SK;
   13703 			CSR_WRITE(sc, WMREG_EECD, reg);
   13704 			reg &= ~EECD_SK;
   13705 			CSR_WRITE(sc, WMREG_EECD, reg);
   13706 			CSR_WRITE_FLUSH(sc);
   13707 			delay(2);
   13708 		}
   13709 		/* XXX: end of workaround */
   13710 
   13711 		/* Set CHIP SELECT. */
   13712 		reg |= EECD_CS;
   13713 		CSR_WRITE(sc, WMREG_EECD, reg);
   13714 		CSR_WRITE_FLUSH(sc);
   13715 		delay(2);
   13716 
   13717 		/* Shift in the READ command. */
   13718 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13719 
   13720 		/* Shift in address. */
   13721 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13722 
   13723 		/* Shift out the data. */
   13724 		wm_eeprom_recvbits(sc, &val, 16);
   13725 		data[i] = val & 0xffff;
   13726 
   13727 		/* Clear CHIP SELECT. */
   13728 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13729 		CSR_WRITE(sc, WMREG_EECD, reg);
   13730 		CSR_WRITE_FLUSH(sc);
   13731 		delay(2);
   13732 	}
   13733 
   13734 	sc->nvm.release(sc);
   13735 	return 0;
   13736 }
   13737 
   13738 /* SPI */
   13739 
   13740 /*
   13741  * Set SPI and FLASH related information from the EECD register.
   13742  * For 82541 and 82547, the word size is taken from EEPROM.
   13743  */
   13744 static int
   13745 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13746 {
   13747 	int size;
   13748 	uint32_t reg;
   13749 	uint16_t data;
   13750 
   13751 	reg = CSR_READ(sc, WMREG_EECD);
   13752 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13753 
   13754 	/* Read the size of NVM from EECD by default */
   13755 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13756 	switch (sc->sc_type) {
   13757 	case WM_T_82541:
   13758 	case WM_T_82541_2:
   13759 	case WM_T_82547:
   13760 	case WM_T_82547_2:
   13761 		/* Set dummy value to access EEPROM */
   13762 		sc->sc_nvm_wordsize = 64;
   13763 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13764 			aprint_error_dev(sc->sc_dev,
   13765 			    "%s: failed to read EEPROM size\n", __func__);
   13766 		}
   13767 		reg = data;
   13768 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13769 		if (size == 0)
   13770 			size = 6; /* 64 word size */
   13771 		else
   13772 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13773 		break;
   13774 	case WM_T_80003:
   13775 	case WM_T_82571:
   13776 	case WM_T_82572:
   13777 	case WM_T_82573: /* SPI case */
   13778 	case WM_T_82574: /* SPI case */
   13779 	case WM_T_82583: /* SPI case */
   13780 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13781 		if (size > 14)
   13782 			size = 14;
   13783 		break;
   13784 	case WM_T_82575:
   13785 	case WM_T_82576:
   13786 	case WM_T_82580:
   13787 	case WM_T_I350:
   13788 	case WM_T_I354:
   13789 	case WM_T_I210:
   13790 	case WM_T_I211:
   13791 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13792 		if (size > 15)
   13793 			size = 15;
   13794 		break;
   13795 	default:
   13796 		aprint_error_dev(sc->sc_dev,
   13797 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13798 		return -1;
   13799 		break;
   13800 	}
   13801 
   13802 	sc->sc_nvm_wordsize = 1 << size;
   13803 
   13804 	return 0;
   13805 }
   13806 
   13807 /*
   13808  * wm_nvm_ready_spi:
   13809  *
   13810  *	Wait for a SPI EEPROM to be ready for commands.
   13811  */
   13812 static int
   13813 wm_nvm_ready_spi(struct wm_softc *sc)
   13814 {
   13815 	uint32_t val;
   13816 	int usec;
   13817 
   13818 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13819 		device_xname(sc->sc_dev), __func__));
   13820 
   13821 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13822 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13823 		wm_eeprom_recvbits(sc, &val, 8);
   13824 		if ((val & SPI_SR_RDY) == 0)
   13825 			break;
   13826 	}
   13827 	if (usec >= SPI_MAX_RETRIES) {
   13828 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13829 		return -1;
   13830 	}
   13831 	return 0;
   13832 }
   13833 
   13834 /*
   13835  * wm_nvm_read_spi:
   13836  *
   13837  *	Read a work from the EEPROM using the SPI protocol.
   13838  */
   13839 static int
   13840 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13841 {
   13842 	uint32_t reg, val;
   13843 	int i;
   13844 	uint8_t opc;
   13845 	int rv;
   13846 
   13847 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13848 		device_xname(sc->sc_dev), __func__));
   13849 
   13850 	rv = sc->nvm.acquire(sc);
   13851 	if (rv != 0)
   13852 		return rv;
   13853 
   13854 	/* Clear SK and CS. */
   13855 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13856 	CSR_WRITE(sc, WMREG_EECD, reg);
   13857 	CSR_WRITE_FLUSH(sc);
   13858 	delay(2);
   13859 
   13860 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13861 		goto out;
   13862 
   13863 	/* Toggle CS to flush commands. */
   13864 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13865 	CSR_WRITE_FLUSH(sc);
   13866 	delay(2);
   13867 	CSR_WRITE(sc, WMREG_EECD, reg);
   13868 	CSR_WRITE_FLUSH(sc);
   13869 	delay(2);
   13870 
   13871 	opc = SPI_OPC_READ;
   13872 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13873 		opc |= SPI_OPC_A8;
   13874 
   13875 	wm_eeprom_sendbits(sc, opc, 8);
   13876 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13877 
   13878 	for (i = 0; i < wordcnt; i++) {
   13879 		wm_eeprom_recvbits(sc, &val, 16);
   13880 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13881 	}
   13882 
   13883 	/* Raise CS and clear SK. */
   13884 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13885 	CSR_WRITE(sc, WMREG_EECD, reg);
   13886 	CSR_WRITE_FLUSH(sc);
   13887 	delay(2);
   13888 
   13889 out:
   13890 	sc->nvm.release(sc);
   13891 	return rv;
   13892 }
   13893 
   13894 /* Using with EERD */
   13895 
   13896 static int
   13897 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13898 {
   13899 	uint32_t attempts = 100000;
   13900 	uint32_t i, reg = 0;
   13901 	int32_t done = -1;
   13902 
   13903 	for (i = 0; i < attempts; i++) {
   13904 		reg = CSR_READ(sc, rw);
   13905 
   13906 		if (reg & EERD_DONE) {
   13907 			done = 0;
   13908 			break;
   13909 		}
   13910 		delay(5);
   13911 	}
   13912 
   13913 	return done;
   13914 }
   13915 
   13916 static int
   13917 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13918 {
   13919 	int i, eerd = 0;
   13920 	int rv;
   13921 
   13922 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13923 		device_xname(sc->sc_dev), __func__));
   13924 
   13925 	rv = sc->nvm.acquire(sc);
   13926 	if (rv != 0)
   13927 		return rv;
   13928 
   13929 	for (i = 0; i < wordcnt; i++) {
   13930 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13931 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13932 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13933 		if (rv != 0) {
   13934 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13935 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13936 			break;
   13937 		}
   13938 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13939 	}
   13940 
   13941 	sc->nvm.release(sc);
   13942 	return rv;
   13943 }
   13944 
   13945 /* Flash */
   13946 
   13947 static int
   13948 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13949 {
   13950 	uint32_t eecd;
   13951 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13952 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13953 	uint32_t nvm_dword = 0;
   13954 	uint8_t sig_byte = 0;
   13955 	int rv;
   13956 
   13957 	switch (sc->sc_type) {
   13958 	case WM_T_PCH_SPT:
   13959 	case WM_T_PCH_CNP:
   13960 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13961 		act_offset = ICH_NVM_SIG_WORD * 2;
   13962 
   13963 		/* Set bank to 0 in case flash read fails. */
   13964 		*bank = 0;
   13965 
   13966 		/* Check bank 0 */
   13967 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13968 		if (rv != 0)
   13969 			return rv;
   13970 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13971 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13972 			*bank = 0;
   13973 			return 0;
   13974 		}
   13975 
   13976 		/* Check bank 1 */
   13977 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13978 		    &nvm_dword);
   13979 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13980 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13981 			*bank = 1;
   13982 			return 0;
   13983 		}
   13984 		aprint_error_dev(sc->sc_dev,
   13985 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13986 		return -1;
   13987 	case WM_T_ICH8:
   13988 	case WM_T_ICH9:
   13989 		eecd = CSR_READ(sc, WMREG_EECD);
   13990 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13991 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13992 			return 0;
   13993 		}
   13994 		/* FALLTHROUGH */
   13995 	default:
   13996 		/* Default to 0 */
   13997 		*bank = 0;
   13998 
   13999 		/* Check bank 0 */
   14000 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   14001 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14002 			*bank = 0;
   14003 			return 0;
   14004 		}
   14005 
   14006 		/* Check bank 1 */
   14007 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   14008 		    &sig_byte);
   14009 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14010 			*bank = 1;
   14011 			return 0;
   14012 		}
   14013 	}
   14014 
   14015 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   14016 		device_xname(sc->sc_dev)));
   14017 	return -1;
   14018 }
   14019 
   14020 /******************************************************************************
   14021  * This function does initial flash setup so that a new read/write/erase cycle
   14022  * can be started.
   14023  *
   14024  * sc - The pointer to the hw structure
   14025  ****************************************************************************/
   14026 static int32_t
   14027 wm_ich8_cycle_init(struct wm_softc *sc)
   14028 {
   14029 	uint16_t hsfsts;
   14030 	int32_t error = 1;
   14031 	int32_t i     = 0;
   14032 
   14033 	if (sc->sc_type >= WM_T_PCH_SPT)
   14034 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   14035 	else
   14036 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14037 
   14038 	/* May be check the Flash Des Valid bit in Hw status */
   14039 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   14040 		return error;
   14041 
   14042 	/* Clear FCERR in Hw status by writing 1 */
   14043 	/* Clear DAEL in Hw status by writing a 1 */
   14044 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   14045 
   14046 	if (sc->sc_type >= WM_T_PCH_SPT)
   14047 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   14048 	else
   14049 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14050 
   14051 	/*
   14052 	 * Either we should have a hardware SPI cycle in progress bit to check
   14053 	 * against, in order to start a new cycle or FDONE bit should be
   14054 	 * changed in the hardware so that it is 1 after hardware reset, which
   14055 	 * can then be used as an indication whether a cycle is in progress or
   14056 	 * has been completed .. we should also have some software semaphore
   14057 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   14058 	 * threads access to those bits can be sequentiallized or a way so that
   14059 	 * 2 threads don't start the cycle at the same time
   14060 	 */
   14061 
   14062 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14063 		/*
   14064 		 * There is no cycle running at present, so we can start a
   14065 		 * cycle
   14066 		 */
   14067 
   14068 		/* Begin by setting Flash Cycle Done. */
   14069 		hsfsts |= HSFSTS_DONE;
   14070 		if (sc->sc_type >= WM_T_PCH_SPT)
   14071 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14072 			    hsfsts & 0xffffUL);
   14073 		else
   14074 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14075 		error = 0;
   14076 	} else {
   14077 		/*
   14078 		 * Otherwise poll for sometime so the current cycle has a
   14079 		 * chance to end before giving up.
   14080 		 */
   14081 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   14082 			if (sc->sc_type >= WM_T_PCH_SPT)
   14083 				hsfsts = ICH8_FLASH_READ32(sc,
   14084 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14085 			else
   14086 				hsfsts = ICH8_FLASH_READ16(sc,
   14087 				    ICH_FLASH_HSFSTS);
   14088 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14089 				error = 0;
   14090 				break;
   14091 			}
   14092 			delay(1);
   14093 		}
   14094 		if (error == 0) {
   14095 			/*
   14096 			 * Successful in waiting for previous cycle to timeout,
   14097 			 * now set the Flash Cycle Done.
   14098 			 */
   14099 			hsfsts |= HSFSTS_DONE;
   14100 			if (sc->sc_type >= WM_T_PCH_SPT)
   14101 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14102 				    hsfsts & 0xffffUL);
   14103 			else
   14104 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   14105 				    hsfsts);
   14106 		}
   14107 	}
   14108 	return error;
   14109 }
   14110 
   14111 /******************************************************************************
   14112  * This function starts a flash cycle and waits for its completion
   14113  *
   14114  * sc - The pointer to the hw structure
   14115  ****************************************************************************/
   14116 static int32_t
   14117 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   14118 {
   14119 	uint16_t hsflctl;
   14120 	uint16_t hsfsts;
   14121 	int32_t error = 1;
   14122 	uint32_t i = 0;
   14123 
   14124 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   14125 	if (sc->sc_type >= WM_T_PCH_SPT)
   14126 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   14127 	else
   14128 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14129 	hsflctl |= HSFCTL_GO;
   14130 	if (sc->sc_type >= WM_T_PCH_SPT)
   14131 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14132 		    (uint32_t)hsflctl << 16);
   14133 	else
   14134 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14135 
   14136 	/* Wait till FDONE bit is set to 1 */
   14137 	do {
   14138 		if (sc->sc_type >= WM_T_PCH_SPT)
   14139 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14140 			    & 0xffffUL;
   14141 		else
   14142 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14143 		if (hsfsts & HSFSTS_DONE)
   14144 			break;
   14145 		delay(1);
   14146 		i++;
   14147 	} while (i < timeout);
   14148 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   14149 		error = 0;
   14150 
   14151 	return error;
   14152 }
   14153 
   14154 /******************************************************************************
   14155  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   14156  *
   14157  * sc - The pointer to the hw structure
   14158  * index - The index of the byte or word to read.
   14159  * size - Size of data to read, 1=byte 2=word, 4=dword
   14160  * data - Pointer to the word to store the value read.
   14161  *****************************************************************************/
   14162 static int32_t
   14163 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   14164     uint32_t size, uint32_t *data)
   14165 {
   14166 	uint16_t hsfsts;
   14167 	uint16_t hsflctl;
   14168 	uint32_t flash_linear_address;
   14169 	uint32_t flash_data = 0;
   14170 	int32_t error = 1;
   14171 	int32_t count = 0;
   14172 
   14173 	if (size < 1  || size > 4 || data == 0x0 ||
   14174 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   14175 		return error;
   14176 
   14177 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   14178 	    sc->sc_ich8_flash_base;
   14179 
   14180 	do {
   14181 		delay(1);
   14182 		/* Steps */
   14183 		error = wm_ich8_cycle_init(sc);
   14184 		if (error)
   14185 			break;
   14186 
   14187 		if (sc->sc_type >= WM_T_PCH_SPT)
   14188 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14189 			    >> 16;
   14190 		else
   14191 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14192 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   14193 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   14194 		    & HSFCTL_BCOUNT_MASK;
   14195 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   14196 		if (sc->sc_type >= WM_T_PCH_SPT) {
   14197 			/*
   14198 			 * In SPT, This register is in Lan memory space, not
   14199 			 * flash. Therefore, only 32 bit access is supported.
   14200 			 */
   14201 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14202 			    (uint32_t)hsflctl << 16);
   14203 		} else
   14204 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14205 
   14206 		/*
   14207 		 * Write the last 24 bits of index into Flash Linear address
   14208 		 * field in Flash Address
   14209 		 */
   14210 		/* TODO: TBD maybe check the index against the size of flash */
   14211 
   14212 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   14213 
   14214 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   14215 
   14216 		/*
   14217 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   14218 		 * the whole sequence a few more times, else read in (shift in)
   14219 		 * the Flash Data0, the order is least significant byte first
   14220 		 * msb to lsb
   14221 		 */
   14222 		if (error == 0) {
   14223 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   14224 			if (size == 1)
   14225 				*data = (uint8_t)(flash_data & 0x000000FF);
   14226 			else if (size == 2)
   14227 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   14228 			else if (size == 4)
   14229 				*data = (uint32_t)flash_data;
   14230 			break;
   14231 		} else {
   14232 			/*
   14233 			 * If we've gotten here, then things are probably
   14234 			 * completely hosed, but if the error condition is
   14235 			 * detected, it won't hurt to give it another try...
   14236 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   14237 			 */
   14238 			if (sc->sc_type >= WM_T_PCH_SPT)
   14239 				hsfsts = ICH8_FLASH_READ32(sc,
   14240 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14241 			else
   14242 				hsfsts = ICH8_FLASH_READ16(sc,
   14243 				    ICH_FLASH_HSFSTS);
   14244 
   14245 			if (hsfsts & HSFSTS_ERR) {
   14246 				/* Repeat for some time before giving up. */
   14247 				continue;
   14248 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   14249 				break;
   14250 		}
   14251 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   14252 
   14253 	return error;
   14254 }
   14255 
   14256 /******************************************************************************
   14257  * Reads a single byte from the NVM using the ICH8 flash access registers.
   14258  *
   14259  * sc - pointer to wm_hw structure
   14260  * index - The index of the byte to read.
   14261  * data - Pointer to a byte to store the value read.
   14262  *****************************************************************************/
   14263 static int32_t
   14264 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   14265 {
   14266 	int32_t status;
   14267 	uint32_t word = 0;
   14268 
   14269 	status = wm_read_ich8_data(sc, index, 1, &word);
   14270 	if (status == 0)
   14271 		*data = (uint8_t)word;
   14272 	else
   14273 		*data = 0;
   14274 
   14275 	return status;
   14276 }
   14277 
   14278 /******************************************************************************
   14279  * Reads a word from the NVM using the ICH8 flash access registers.
   14280  *
   14281  * sc - pointer to wm_hw structure
   14282  * index - The starting byte index of the word to read.
   14283  * data - Pointer to a word to store the value read.
   14284  *****************************************************************************/
   14285 static int32_t
   14286 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   14287 {
   14288 	int32_t status;
   14289 	uint32_t word = 0;
   14290 
   14291 	status = wm_read_ich8_data(sc, index, 2, &word);
   14292 	if (status == 0)
   14293 		*data = (uint16_t)word;
   14294 	else
   14295 		*data = 0;
   14296 
   14297 	return status;
   14298 }
   14299 
   14300 /******************************************************************************
   14301  * Reads a dword from the NVM using the ICH8 flash access registers.
   14302  *
   14303  * sc - pointer to wm_hw structure
   14304  * index - The starting byte index of the word to read.
   14305  * data - Pointer to a word to store the value read.
   14306  *****************************************************************************/
   14307 static int32_t
   14308 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   14309 {
   14310 	int32_t status;
   14311 
   14312 	status = wm_read_ich8_data(sc, index, 4, data);
   14313 	return status;
   14314 }
   14315 
   14316 /******************************************************************************
   14317  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   14318  * register.
   14319  *
   14320  * sc - Struct containing variables accessed by shared code
   14321  * offset - offset of word in the EEPROM to read
   14322  * data - word read from the EEPROM
   14323  * words - number of words to read
   14324  *****************************************************************************/
   14325 static int
   14326 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14327 {
   14328 	int rv;
   14329 	uint32_t flash_bank = 0;
   14330 	uint32_t act_offset = 0;
   14331 	uint32_t bank_offset = 0;
   14332 	uint16_t word = 0;
   14333 	uint16_t i = 0;
   14334 
   14335 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14336 		device_xname(sc->sc_dev), __func__));
   14337 
   14338 	rv = sc->nvm.acquire(sc);
   14339 	if (rv != 0)
   14340 		return rv;
   14341 
   14342 	/*
   14343 	 * We need to know which is the valid flash bank.  In the event
   14344 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14345 	 * managing flash_bank. So it cannot be trusted and needs
   14346 	 * to be updated with each read.
   14347 	 */
   14348 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14349 	if (rv) {
   14350 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14351 			device_xname(sc->sc_dev)));
   14352 		flash_bank = 0;
   14353 	}
   14354 
   14355 	/*
   14356 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14357 	 * size
   14358 	 */
   14359 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14360 
   14361 	for (i = 0; i < words; i++) {
   14362 		/* The NVM part needs a byte offset, hence * 2 */
   14363 		act_offset = bank_offset + ((offset + i) * 2);
   14364 		rv = wm_read_ich8_word(sc, act_offset, &word);
   14365 		if (rv) {
   14366 			aprint_error_dev(sc->sc_dev,
   14367 			    "%s: failed to read NVM\n", __func__);
   14368 			break;
   14369 		}
   14370 		data[i] = word;
   14371 	}
   14372 
   14373 	sc->nvm.release(sc);
   14374 	return rv;
   14375 }
   14376 
   14377 /******************************************************************************
   14378  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   14379  * register.
   14380  *
   14381  * sc - Struct containing variables accessed by shared code
   14382  * offset - offset of word in the EEPROM to read
   14383  * data - word read from the EEPROM
   14384  * words - number of words to read
   14385  *****************************************************************************/
   14386 static int
   14387 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14388 {
   14389 	int	 rv;
   14390 	uint32_t flash_bank = 0;
   14391 	uint32_t act_offset = 0;
   14392 	uint32_t bank_offset = 0;
   14393 	uint32_t dword = 0;
   14394 	uint16_t i = 0;
   14395 
   14396 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14397 		device_xname(sc->sc_dev), __func__));
   14398 
   14399 	rv = sc->nvm.acquire(sc);
   14400 	if (rv != 0)
   14401 		return rv;
   14402 
   14403 	/*
   14404 	 * We need to know which is the valid flash bank.  In the event
   14405 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14406 	 * managing flash_bank. So it cannot be trusted and needs
   14407 	 * to be updated with each read.
   14408 	 */
   14409 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14410 	if (rv) {
   14411 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14412 			device_xname(sc->sc_dev)));
   14413 		flash_bank = 0;
   14414 	}
   14415 
   14416 	/*
   14417 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14418 	 * size
   14419 	 */
   14420 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14421 
   14422 	for (i = 0; i < words; i++) {
   14423 		/* The NVM part needs a byte offset, hence * 2 */
   14424 		act_offset = bank_offset + ((offset + i) * 2);
   14425 		/* but we must read dword aligned, so mask ... */
   14426 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   14427 		if (rv) {
   14428 			aprint_error_dev(sc->sc_dev,
   14429 			    "%s: failed to read NVM\n", __func__);
   14430 			break;
   14431 		}
   14432 		/* ... and pick out low or high word */
   14433 		if ((act_offset & 0x2) == 0)
   14434 			data[i] = (uint16_t)(dword & 0xFFFF);
   14435 		else
   14436 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   14437 	}
   14438 
   14439 	sc->nvm.release(sc);
   14440 	return rv;
   14441 }
   14442 
   14443 /* iNVM */
   14444 
   14445 static int
   14446 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   14447 {
   14448 	int32_t	 rv = 0;
   14449 	uint32_t invm_dword;
   14450 	uint16_t i;
   14451 	uint8_t record_type, word_address;
   14452 
   14453 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14454 		device_xname(sc->sc_dev), __func__));
   14455 
   14456 	for (i = 0; i < INVM_SIZE; i++) {
   14457 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   14458 		/* Get record type */
   14459 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   14460 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   14461 			break;
   14462 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   14463 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   14464 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   14465 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   14466 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   14467 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   14468 			if (word_address == address) {
   14469 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   14470 				rv = 0;
   14471 				break;
   14472 			}
   14473 		}
   14474 	}
   14475 
   14476 	return rv;
   14477 }
   14478 
   14479 static int
   14480 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14481 {
   14482 	int i, rv;
   14483 
   14484 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14485 		device_xname(sc->sc_dev), __func__));
   14486 
   14487 	rv = sc->nvm.acquire(sc);
   14488 	if (rv != 0)
   14489 		return rv;
   14490 
   14491 	for (i = 0; i < words; i++) {
   14492 		switch (offset + i) {
   14493 		case NVM_OFF_MACADDR:
   14494 		case NVM_OFF_MACADDR1:
   14495 		case NVM_OFF_MACADDR2:
   14496 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   14497 			if (rv != 0) {
   14498 				data[i] = 0xffff;
   14499 				rv = -1;
   14500 			}
   14501 			break;
   14502 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   14503 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14504 			if (rv != 0) {
   14505 				*data = INVM_DEFAULT_AL;
   14506 				rv = 0;
   14507 			}
   14508 			break;
   14509 		case NVM_OFF_CFG2:
   14510 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14511 			if (rv != 0) {
   14512 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   14513 				rv = 0;
   14514 			}
   14515 			break;
   14516 		case NVM_OFF_CFG4:
   14517 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14518 			if (rv != 0) {
   14519 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   14520 				rv = 0;
   14521 			}
   14522 			break;
   14523 		case NVM_OFF_LED_1_CFG:
   14524 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14525 			if (rv != 0) {
   14526 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   14527 				rv = 0;
   14528 			}
   14529 			break;
   14530 		case NVM_OFF_LED_0_2_CFG:
   14531 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14532 			if (rv != 0) {
   14533 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   14534 				rv = 0;
   14535 			}
   14536 			break;
   14537 		case NVM_OFF_ID_LED_SETTINGS:
   14538 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14539 			if (rv != 0) {
   14540 				*data = ID_LED_RESERVED_FFFF;
   14541 				rv = 0;
   14542 			}
   14543 			break;
   14544 		default:
   14545 			DPRINTF(sc, WM_DEBUG_NVM,
   14546 			    ("NVM word 0x%02x is not mapped.\n", offset));
   14547 			*data = NVM_RESERVED_WORD;
   14548 			break;
   14549 		}
   14550 	}
   14551 
   14552 	sc->nvm.release(sc);
   14553 	return rv;
   14554 }
   14555 
   14556 /* Lock, detecting NVM type, validate checksum, version and read */
   14557 
   14558 static int
   14559 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   14560 {
   14561 	uint32_t eecd = 0;
   14562 
   14563 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   14564 	    || sc->sc_type == WM_T_82583) {
   14565 		eecd = CSR_READ(sc, WMREG_EECD);
   14566 
   14567 		/* Isolate bits 15 & 16 */
   14568 		eecd = ((eecd >> 15) & 0x03);
   14569 
   14570 		/* If both bits are set, device is Flash type */
   14571 		if (eecd == 0x03)
   14572 			return 0;
   14573 	}
   14574 	return 1;
   14575 }
   14576 
   14577 static int
   14578 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   14579 {
   14580 	uint32_t eec;
   14581 
   14582 	eec = CSR_READ(sc, WMREG_EEC);
   14583 	if ((eec & EEC_FLASH_DETECTED) != 0)
   14584 		return 1;
   14585 
   14586 	return 0;
   14587 }
   14588 
   14589 /*
   14590  * wm_nvm_validate_checksum
   14591  *
   14592  * The checksum is defined as the sum of the first 64 (16 bit) words.
   14593  */
   14594 static int
   14595 wm_nvm_validate_checksum(struct wm_softc *sc)
   14596 {
   14597 	uint16_t checksum;
   14598 	uint16_t eeprom_data;
   14599 #ifdef WM_DEBUG
   14600 	uint16_t csum_wordaddr, valid_checksum;
   14601 #endif
   14602 	int i;
   14603 
   14604 	checksum = 0;
   14605 
   14606 	/* Don't check for I211 */
   14607 	if (sc->sc_type == WM_T_I211)
   14608 		return 0;
   14609 
   14610 #ifdef WM_DEBUG
   14611 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   14612 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   14613 		csum_wordaddr = NVM_OFF_COMPAT;
   14614 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   14615 	} else {
   14616 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   14617 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   14618 	}
   14619 
   14620 	/* Dump EEPROM image for debug */
   14621 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14622 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14623 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   14624 		/* XXX PCH_SPT? */
   14625 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   14626 		if ((eeprom_data & valid_checksum) == 0)
   14627 			DPRINTF(sc, WM_DEBUG_NVM,
   14628 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   14629 				device_xname(sc->sc_dev), eeprom_data,
   14630 				    valid_checksum));
   14631 	}
   14632 
   14633 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   14634 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   14635 		for (i = 0; i < NVM_SIZE; i++) {
   14636 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14637 				printf("XXXX ");
   14638 			else
   14639 				printf("%04hx ", eeprom_data);
   14640 			if (i % 8 == 7)
   14641 				printf("\n");
   14642 		}
   14643 	}
   14644 
   14645 #endif /* WM_DEBUG */
   14646 
   14647 	for (i = 0; i < NVM_SIZE; i++) {
   14648 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14649 			return -1;
   14650 		checksum += eeprom_data;
   14651 	}
   14652 
   14653 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14654 #ifdef WM_DEBUG
   14655 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14656 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14657 #endif
   14658 	}
   14659 
   14660 	return 0;
   14661 }
   14662 
   14663 static void
   14664 wm_nvm_version_invm(struct wm_softc *sc)
   14665 {
   14666 	uint32_t dword;
   14667 
   14668 	/*
   14669 	 * Linux's code to decode version is very strange, so we don't
   14670 	 * obey that algorithm and just use word 61 as the document.
   14671 	 * Perhaps it's not perfect though...
   14672 	 *
   14673 	 * Example:
   14674 	 *
   14675 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14676 	 */
   14677 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14678 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14679 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14680 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14681 }
   14682 
   14683 static void
   14684 wm_nvm_version(struct wm_softc *sc)
   14685 {
   14686 	uint16_t major, minor, build, patch;
   14687 	uint16_t uid0, uid1;
   14688 	uint16_t nvm_data;
   14689 	uint16_t off;
   14690 	bool check_version = false;
   14691 	bool check_optionrom = false;
   14692 	bool have_build = false;
   14693 	bool have_uid = true;
   14694 
   14695 	/*
   14696 	 * Version format:
   14697 	 *
   14698 	 * XYYZ
   14699 	 * X0YZ
   14700 	 * X0YY
   14701 	 *
   14702 	 * Example:
   14703 	 *
   14704 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14705 	 *	82571	0x50a6	5.10.6?
   14706 	 *	82572	0x506a	5.6.10?
   14707 	 *	82572EI	0x5069	5.6.9?
   14708 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14709 	 *		0x2013	2.1.3?
   14710 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14711 	 * ICH8+82567	0x0040	0.4.0?
   14712 	 * ICH9+82566	0x1040	1.4.0?
   14713 	 *ICH10+82567	0x0043	0.4.3?
   14714 	 *  PCH+82577	0x00c1	0.12.1?
   14715 	 * PCH2+82579	0x00d3	0.13.3?
   14716 	 *		0x00d4	0.13.4?
   14717 	 *  LPT+I218	0x0023	0.2.3?
   14718 	 *  SPT+I219	0x0084	0.8.4?
   14719 	 *  CNP+I219	0x0054	0.5.4?
   14720 	 */
   14721 
   14722 	/*
   14723 	 * XXX
   14724 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14725 	 * I've never seen real 82574 hardware with such small SPI ROM.
   14726 	 */
   14727 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14728 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14729 		have_uid = false;
   14730 
   14731 	switch (sc->sc_type) {
   14732 	case WM_T_82571:
   14733 	case WM_T_82572:
   14734 	case WM_T_82574:
   14735 	case WM_T_82583:
   14736 		check_version = true;
   14737 		check_optionrom = true;
   14738 		have_build = true;
   14739 		break;
   14740 	case WM_T_ICH8:
   14741 	case WM_T_ICH9:
   14742 	case WM_T_ICH10:
   14743 	case WM_T_PCH:
   14744 	case WM_T_PCH2:
   14745 	case WM_T_PCH_LPT:
   14746 	case WM_T_PCH_SPT:
   14747 	case WM_T_PCH_CNP:
   14748 		check_version = true;
   14749 		have_build = true;
   14750 		have_uid = false;
   14751 		break;
   14752 	case WM_T_82575:
   14753 	case WM_T_82576:
   14754 	case WM_T_82580:
   14755 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14756 			check_version = true;
   14757 		break;
   14758 	case WM_T_I211:
   14759 		wm_nvm_version_invm(sc);
   14760 		have_uid = false;
   14761 		goto printver;
   14762 	case WM_T_I210:
   14763 		if (!wm_nvm_flash_presence_i210(sc)) {
   14764 			wm_nvm_version_invm(sc);
   14765 			have_uid = false;
   14766 			goto printver;
   14767 		}
   14768 		/* FALLTHROUGH */
   14769 	case WM_T_I350:
   14770 	case WM_T_I354:
   14771 		check_version = true;
   14772 		check_optionrom = true;
   14773 		break;
   14774 	default:
   14775 		return;
   14776 	}
   14777 	if (check_version
   14778 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14779 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14780 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14781 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14782 			build = nvm_data & NVM_BUILD_MASK;
   14783 			have_build = true;
   14784 		} else
   14785 			minor = nvm_data & 0x00ff;
   14786 
   14787 		/* Decimal */
   14788 		minor = (minor / 16) * 10 + (minor % 16);
   14789 		sc->sc_nvm_ver_major = major;
   14790 		sc->sc_nvm_ver_minor = minor;
   14791 
   14792 printver:
   14793 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14794 		    sc->sc_nvm_ver_minor);
   14795 		if (have_build) {
   14796 			sc->sc_nvm_ver_build = build;
   14797 			aprint_verbose(".%d", build);
   14798 		}
   14799 	}
   14800 
   14801 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14802 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14803 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14804 		/* Option ROM Version */
   14805 		if ((off != 0x0000) && (off != 0xffff)) {
   14806 			int rv;
   14807 
   14808 			off += NVM_COMBO_VER_OFF;
   14809 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14810 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14811 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14812 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14813 				/* 16bits */
   14814 				major = uid0 >> 8;
   14815 				build = (uid0 << 8) | (uid1 >> 8);
   14816 				patch = uid1 & 0x00ff;
   14817 				aprint_verbose(", option ROM Version %d.%d.%d",
   14818 				    major, build, patch);
   14819 			}
   14820 		}
   14821 	}
   14822 
   14823 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14824 		aprint_verbose(", Image Unique ID %08x",
   14825 		    ((uint32_t)uid1 << 16) | uid0);
   14826 }
   14827 
   14828 /*
   14829  * wm_nvm_read:
   14830  *
   14831  *	Read data from the serial EEPROM.
   14832  */
   14833 static int
   14834 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14835 {
   14836 	int rv;
   14837 
   14838 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14839 		device_xname(sc->sc_dev), __func__));
   14840 
   14841 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14842 		return -1;
   14843 
   14844 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14845 
   14846 	return rv;
   14847 }
   14848 
   14849 /*
   14850  * Hardware semaphores.
   14851  * Very complexed...
   14852  */
   14853 
   14854 static int
   14855 wm_get_null(struct wm_softc *sc)
   14856 {
   14857 
   14858 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14859 		device_xname(sc->sc_dev), __func__));
   14860 	return 0;
   14861 }
   14862 
   14863 static void
   14864 wm_put_null(struct wm_softc *sc)
   14865 {
   14866 
   14867 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14868 		device_xname(sc->sc_dev), __func__));
   14869 	return;
   14870 }
   14871 
   14872 static int
   14873 wm_get_eecd(struct wm_softc *sc)
   14874 {
   14875 	uint32_t reg;
   14876 	int x;
   14877 
   14878 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14879 		device_xname(sc->sc_dev), __func__));
   14880 
   14881 	reg = CSR_READ(sc, WMREG_EECD);
   14882 
   14883 	/* Request EEPROM access. */
   14884 	reg |= EECD_EE_REQ;
   14885 	CSR_WRITE(sc, WMREG_EECD, reg);
   14886 
   14887 	/* ..and wait for it to be granted. */
   14888 	for (x = 0; x < 1000; x++) {
   14889 		reg = CSR_READ(sc, WMREG_EECD);
   14890 		if (reg & EECD_EE_GNT)
   14891 			break;
   14892 		delay(5);
   14893 	}
   14894 	if ((reg & EECD_EE_GNT) == 0) {
   14895 		aprint_error_dev(sc->sc_dev,
   14896 		    "could not acquire EEPROM GNT\n");
   14897 		reg &= ~EECD_EE_REQ;
   14898 		CSR_WRITE(sc, WMREG_EECD, reg);
   14899 		return -1;
   14900 	}
   14901 
   14902 	return 0;
   14903 }
   14904 
   14905 static void
   14906 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14907 {
   14908 
   14909 	*eecd |= EECD_SK;
   14910 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14911 	CSR_WRITE_FLUSH(sc);
   14912 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14913 		delay(1);
   14914 	else
   14915 		delay(50);
   14916 }
   14917 
   14918 static void
   14919 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14920 {
   14921 
   14922 	*eecd &= ~EECD_SK;
   14923 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14924 	CSR_WRITE_FLUSH(sc);
   14925 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14926 		delay(1);
   14927 	else
   14928 		delay(50);
   14929 }
   14930 
   14931 static void
   14932 wm_put_eecd(struct wm_softc *sc)
   14933 {
   14934 	uint32_t reg;
   14935 
   14936 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14937 		device_xname(sc->sc_dev), __func__));
   14938 
   14939 	/* Stop nvm */
   14940 	reg = CSR_READ(sc, WMREG_EECD);
   14941 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14942 		/* Pull CS high */
   14943 		reg |= EECD_CS;
   14944 		wm_nvm_eec_clock_lower(sc, &reg);
   14945 	} else {
   14946 		/* CS on Microwire is active-high */
   14947 		reg &= ~(EECD_CS | EECD_DI);
   14948 		CSR_WRITE(sc, WMREG_EECD, reg);
   14949 		wm_nvm_eec_clock_raise(sc, &reg);
   14950 		wm_nvm_eec_clock_lower(sc, &reg);
   14951 	}
   14952 
   14953 	reg = CSR_READ(sc, WMREG_EECD);
   14954 	reg &= ~EECD_EE_REQ;
   14955 	CSR_WRITE(sc, WMREG_EECD, reg);
   14956 
   14957 	return;
   14958 }
   14959 
   14960 /*
   14961  * Get hardware semaphore.
   14962  * Same as e1000_get_hw_semaphore_generic()
   14963  */
   14964 static int
   14965 wm_get_swsm_semaphore(struct wm_softc *sc)
   14966 {
   14967 	int32_t timeout;
   14968 	uint32_t swsm;
   14969 
   14970 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14971 		device_xname(sc->sc_dev), __func__));
   14972 	KASSERT(sc->sc_nvm_wordsize > 0);
   14973 
   14974 retry:
   14975 	/* Get the SW semaphore. */
   14976 	timeout = sc->sc_nvm_wordsize + 1;
   14977 	while (timeout) {
   14978 		swsm = CSR_READ(sc, WMREG_SWSM);
   14979 
   14980 		if ((swsm & SWSM_SMBI) == 0)
   14981 			break;
   14982 
   14983 		delay(50);
   14984 		timeout--;
   14985 	}
   14986 
   14987 	if (timeout == 0) {
   14988 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14989 			/*
   14990 			 * In rare circumstances, the SW semaphore may already
   14991 			 * be held unintentionally. Clear the semaphore once
   14992 			 * before giving up.
   14993 			 */
   14994 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14995 			wm_put_swsm_semaphore(sc);
   14996 			goto retry;
   14997 		}
   14998 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   14999 		return -1;
   15000 	}
   15001 
   15002 	/* Get the FW semaphore. */
   15003 	timeout = sc->sc_nvm_wordsize + 1;
   15004 	while (timeout) {
   15005 		swsm = CSR_READ(sc, WMREG_SWSM);
   15006 		swsm |= SWSM_SWESMBI;
   15007 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   15008 		/* If we managed to set the bit we got the semaphore. */
   15009 		swsm = CSR_READ(sc, WMREG_SWSM);
   15010 		if (swsm & SWSM_SWESMBI)
   15011 			break;
   15012 
   15013 		delay(50);
   15014 		timeout--;
   15015 	}
   15016 
   15017 	if (timeout == 0) {
   15018 		aprint_error_dev(sc->sc_dev,
   15019 		    "could not acquire SWSM SWESMBI\n");
   15020 		/* Release semaphores */
   15021 		wm_put_swsm_semaphore(sc);
   15022 		return -1;
   15023 	}
   15024 	return 0;
   15025 }
   15026 
   15027 /*
   15028  * Put hardware semaphore.
   15029  * Same as e1000_put_hw_semaphore_generic()
   15030  */
   15031 static void
   15032 wm_put_swsm_semaphore(struct wm_softc *sc)
   15033 {
   15034 	uint32_t swsm;
   15035 
   15036 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15037 		device_xname(sc->sc_dev), __func__));
   15038 
   15039 	swsm = CSR_READ(sc, WMREG_SWSM);
   15040 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   15041 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   15042 }
   15043 
   15044 /*
   15045  * Get SW/FW semaphore.
   15046  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   15047  */
   15048 static int
   15049 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15050 {
   15051 	uint32_t swfw_sync;
   15052 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   15053 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   15054 	int timeout;
   15055 
   15056 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15057 		device_xname(sc->sc_dev), __func__));
   15058 
   15059 	if (sc->sc_type == WM_T_80003)
   15060 		timeout = 50;
   15061 	else
   15062 		timeout = 200;
   15063 
   15064 	while (timeout) {
   15065 		if (wm_get_swsm_semaphore(sc)) {
   15066 			aprint_error_dev(sc->sc_dev,
   15067 			    "%s: failed to get semaphore\n",
   15068 			    __func__);
   15069 			return -1;
   15070 		}
   15071 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15072 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   15073 			swfw_sync |= swmask;
   15074 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15075 			wm_put_swsm_semaphore(sc);
   15076 			return 0;
   15077 		}
   15078 		wm_put_swsm_semaphore(sc);
   15079 		delay(5000);
   15080 		timeout--;
   15081 	}
   15082 	device_printf(sc->sc_dev,
   15083 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   15084 	    mask, swfw_sync);
   15085 	return -1;
   15086 }
   15087 
   15088 static void
   15089 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15090 {
   15091 	uint32_t swfw_sync;
   15092 
   15093 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15094 		device_xname(sc->sc_dev), __func__));
   15095 
   15096 	while (wm_get_swsm_semaphore(sc) != 0)
   15097 		continue;
   15098 
   15099 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15100 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   15101 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15102 
   15103 	wm_put_swsm_semaphore(sc);
   15104 }
   15105 
   15106 static int
   15107 wm_get_nvm_80003(struct wm_softc *sc)
   15108 {
   15109 	int rv;
   15110 
   15111 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15112 		device_xname(sc->sc_dev), __func__));
   15113 
   15114 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   15115 		aprint_error_dev(sc->sc_dev,
   15116 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   15117 		return rv;
   15118 	}
   15119 
   15120 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15121 	    && (rv = wm_get_eecd(sc)) != 0) {
   15122 		aprint_error_dev(sc->sc_dev,
   15123 		    "%s: failed to get semaphore(EECD)\n", __func__);
   15124 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15125 		return rv;
   15126 	}
   15127 
   15128 	return 0;
   15129 }
   15130 
   15131 static void
   15132 wm_put_nvm_80003(struct wm_softc *sc)
   15133 {
   15134 
   15135 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15136 		device_xname(sc->sc_dev), __func__));
   15137 
   15138 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15139 		wm_put_eecd(sc);
   15140 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15141 }
   15142 
   15143 static int
   15144 wm_get_nvm_82571(struct wm_softc *sc)
   15145 {
   15146 	int rv;
   15147 
   15148 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15149 		device_xname(sc->sc_dev), __func__));
   15150 
   15151 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   15152 		return rv;
   15153 
   15154 	switch (sc->sc_type) {
   15155 	case WM_T_82573:
   15156 		break;
   15157 	default:
   15158 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15159 			rv = wm_get_eecd(sc);
   15160 		break;
   15161 	}
   15162 
   15163 	if (rv != 0) {
   15164 		aprint_error_dev(sc->sc_dev,
   15165 		    "%s: failed to get semaphore\n",
   15166 		    __func__);
   15167 		wm_put_swsm_semaphore(sc);
   15168 	}
   15169 
   15170 	return rv;
   15171 }
   15172 
   15173 static void
   15174 wm_put_nvm_82571(struct wm_softc *sc)
   15175 {
   15176 
   15177 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15178 		device_xname(sc->sc_dev), __func__));
   15179 
   15180 	switch (sc->sc_type) {
   15181 	case WM_T_82573:
   15182 		break;
   15183 	default:
   15184 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15185 			wm_put_eecd(sc);
   15186 		break;
   15187 	}
   15188 
   15189 	wm_put_swsm_semaphore(sc);
   15190 }
   15191 
   15192 static int
   15193 wm_get_phy_82575(struct wm_softc *sc)
   15194 {
   15195 
   15196 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15197 		device_xname(sc->sc_dev), __func__));
   15198 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15199 }
   15200 
   15201 static void
   15202 wm_put_phy_82575(struct wm_softc *sc)
   15203 {
   15204 
   15205 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15206 		device_xname(sc->sc_dev), __func__));
   15207 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15208 }
   15209 
   15210 static int
   15211 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   15212 {
   15213 	uint32_t ext_ctrl;
   15214 	int timeout = 200;
   15215 
   15216 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15217 		device_xname(sc->sc_dev), __func__));
   15218 
   15219 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15220 	for (timeout = 0; timeout < 200; timeout++) {
   15221 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15222 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15223 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15224 
   15225 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15226 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15227 			return 0;
   15228 		delay(5000);
   15229 	}
   15230 	device_printf(sc->sc_dev,
   15231 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   15232 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15233 	return -1;
   15234 }
   15235 
   15236 static void
   15237 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   15238 {
   15239 	uint32_t ext_ctrl;
   15240 
   15241 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15242 		device_xname(sc->sc_dev), __func__));
   15243 
   15244 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15245 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15246 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15247 
   15248 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15249 }
   15250 
   15251 static int
   15252 wm_get_swflag_ich8lan(struct wm_softc *sc)
   15253 {
   15254 	uint32_t ext_ctrl;
   15255 	int timeout;
   15256 
   15257 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15258 		device_xname(sc->sc_dev), __func__));
   15259 	mutex_enter(sc->sc_ich_phymtx);
   15260 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   15261 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15262 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   15263 			break;
   15264 		delay(1000);
   15265 	}
   15266 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   15267 		device_printf(sc->sc_dev,
   15268 		    "SW has already locked the resource\n");
   15269 		goto out;
   15270 	}
   15271 
   15272 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15273 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15274 	for (timeout = 0; timeout < 1000; timeout++) {
   15275 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15276 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15277 			break;
   15278 		delay(1000);
   15279 	}
   15280 	if (timeout >= 1000) {
   15281 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   15282 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15283 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15284 		goto out;
   15285 	}
   15286 	return 0;
   15287 
   15288 out:
   15289 	mutex_exit(sc->sc_ich_phymtx);
   15290 	return -1;
   15291 }
   15292 
   15293 static void
   15294 wm_put_swflag_ich8lan(struct wm_softc *sc)
   15295 {
   15296 	uint32_t ext_ctrl;
   15297 
   15298 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15299 		device_xname(sc->sc_dev), __func__));
   15300 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15301 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   15302 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15303 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15304 	} else
   15305 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   15306 
   15307 	mutex_exit(sc->sc_ich_phymtx);
   15308 }
   15309 
   15310 static int
   15311 wm_get_nvm_ich8lan(struct wm_softc *sc)
   15312 {
   15313 
   15314 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15315 		device_xname(sc->sc_dev), __func__));
   15316 	mutex_enter(sc->sc_ich_nvmmtx);
   15317 
   15318 	return 0;
   15319 }
   15320 
   15321 static void
   15322 wm_put_nvm_ich8lan(struct wm_softc *sc)
   15323 {
   15324 
   15325 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15326 		device_xname(sc->sc_dev), __func__));
   15327 	mutex_exit(sc->sc_ich_nvmmtx);
   15328 }
   15329 
   15330 static int
   15331 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   15332 {
   15333 	int i = 0;
   15334 	uint32_t reg;
   15335 
   15336 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15337 		device_xname(sc->sc_dev), __func__));
   15338 
   15339 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15340 	do {
   15341 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   15342 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15343 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15344 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   15345 			break;
   15346 		delay(2*1000);
   15347 		i++;
   15348 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   15349 
   15350 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   15351 		wm_put_hw_semaphore_82573(sc);
   15352 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   15353 		    device_xname(sc->sc_dev));
   15354 		return -1;
   15355 	}
   15356 
   15357 	return 0;
   15358 }
   15359 
   15360 static void
   15361 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   15362 {
   15363 	uint32_t reg;
   15364 
   15365 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15366 		device_xname(sc->sc_dev), __func__));
   15367 
   15368 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15369 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15370 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15371 }
   15372 
   15373 /*
   15374  * Management mode and power management related subroutines.
   15375  * BMC, AMT, suspend/resume and EEE.
   15376  */
   15377 
   15378 #ifdef WM_WOL
   15379 static int
   15380 wm_check_mng_mode(struct wm_softc *sc)
   15381 {
   15382 	int rv;
   15383 
   15384 	switch (sc->sc_type) {
   15385 	case WM_T_ICH8:
   15386 	case WM_T_ICH9:
   15387 	case WM_T_ICH10:
   15388 	case WM_T_PCH:
   15389 	case WM_T_PCH2:
   15390 	case WM_T_PCH_LPT:
   15391 	case WM_T_PCH_SPT:
   15392 	case WM_T_PCH_CNP:
   15393 		rv = wm_check_mng_mode_ich8lan(sc);
   15394 		break;
   15395 	case WM_T_82574:
   15396 	case WM_T_82583:
   15397 		rv = wm_check_mng_mode_82574(sc);
   15398 		break;
   15399 	case WM_T_82571:
   15400 	case WM_T_82572:
   15401 	case WM_T_82573:
   15402 	case WM_T_80003:
   15403 		rv = wm_check_mng_mode_generic(sc);
   15404 		break;
   15405 	default:
   15406 		/* Noting to do */
   15407 		rv = 0;
   15408 		break;
   15409 	}
   15410 
   15411 	return rv;
   15412 }
   15413 
   15414 static int
   15415 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   15416 {
   15417 	uint32_t fwsm;
   15418 
   15419 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15420 
   15421 	if (((fwsm & FWSM_FW_VALID) != 0)
   15422 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15423 		return 1;
   15424 
   15425 	return 0;
   15426 }
   15427 
   15428 static int
   15429 wm_check_mng_mode_82574(struct wm_softc *sc)
   15430 {
   15431 	uint16_t data;
   15432 
   15433 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15434 
   15435 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   15436 		return 1;
   15437 
   15438 	return 0;
   15439 }
   15440 
   15441 static int
   15442 wm_check_mng_mode_generic(struct wm_softc *sc)
   15443 {
   15444 	uint32_t fwsm;
   15445 
   15446 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15447 
   15448 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   15449 		return 1;
   15450 
   15451 	return 0;
   15452 }
   15453 #endif /* WM_WOL */
   15454 
   15455 static int
   15456 wm_enable_mng_pass_thru(struct wm_softc *sc)
   15457 {
   15458 	uint32_t manc, fwsm, factps;
   15459 
   15460 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   15461 		return 0;
   15462 
   15463 	manc = CSR_READ(sc, WMREG_MANC);
   15464 
   15465 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   15466 		device_xname(sc->sc_dev), manc));
   15467 	if ((manc & MANC_RECV_TCO_EN) == 0)
   15468 		return 0;
   15469 
   15470 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   15471 		fwsm = CSR_READ(sc, WMREG_FWSM);
   15472 		factps = CSR_READ(sc, WMREG_FACTPS);
   15473 		if (((factps & FACTPS_MNGCG) == 0)
   15474 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15475 			return 1;
   15476 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   15477 		uint16_t data;
   15478 
   15479 		factps = CSR_READ(sc, WMREG_FACTPS);
   15480 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15481 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   15482 			device_xname(sc->sc_dev), factps, data));
   15483 		if (((factps & FACTPS_MNGCG) == 0)
   15484 		    && ((data & NVM_CFG2_MNGM_MASK)
   15485 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   15486 			return 1;
   15487 	} else if (((manc & MANC_SMBUS_EN) != 0)
   15488 	    && ((manc & MANC_ASF_EN) == 0))
   15489 		return 1;
   15490 
   15491 	return 0;
   15492 }
   15493 
   15494 static bool
   15495 wm_phy_resetisblocked(struct wm_softc *sc)
   15496 {
   15497 	bool blocked = false;
   15498 	uint32_t reg;
   15499 	int i = 0;
   15500 
   15501 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15502 		device_xname(sc->sc_dev), __func__));
   15503 
   15504 	switch (sc->sc_type) {
   15505 	case WM_T_ICH8:
   15506 	case WM_T_ICH9:
   15507 	case WM_T_ICH10:
   15508 	case WM_T_PCH:
   15509 	case WM_T_PCH2:
   15510 	case WM_T_PCH_LPT:
   15511 	case WM_T_PCH_SPT:
   15512 	case WM_T_PCH_CNP:
   15513 		do {
   15514 			reg = CSR_READ(sc, WMREG_FWSM);
   15515 			if ((reg & FWSM_RSPCIPHY) == 0) {
   15516 				blocked = true;
   15517 				delay(10*1000);
   15518 				continue;
   15519 			}
   15520 			blocked = false;
   15521 		} while (blocked && (i++ < 30));
   15522 		return blocked;
   15523 		break;
   15524 	case WM_T_82571:
   15525 	case WM_T_82572:
   15526 	case WM_T_82573:
   15527 	case WM_T_82574:
   15528 	case WM_T_82583:
   15529 	case WM_T_80003:
   15530 		reg = CSR_READ(sc, WMREG_MANC);
   15531 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   15532 			return true;
   15533 		else
   15534 			return false;
   15535 		break;
   15536 	default:
   15537 		/* No problem */
   15538 		break;
   15539 	}
   15540 
   15541 	return false;
   15542 }
   15543 
   15544 static void
   15545 wm_get_hw_control(struct wm_softc *sc)
   15546 {
   15547 	uint32_t reg;
   15548 
   15549 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15550 		device_xname(sc->sc_dev), __func__));
   15551 
   15552 	if (sc->sc_type == WM_T_82573) {
   15553 		reg = CSR_READ(sc, WMREG_SWSM);
   15554 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   15555 	} else if (sc->sc_type >= WM_T_82571) {
   15556 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15557 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   15558 	}
   15559 }
   15560 
   15561 static void
   15562 wm_release_hw_control(struct wm_softc *sc)
   15563 {
   15564 	uint32_t reg;
   15565 
   15566 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15567 		device_xname(sc->sc_dev), __func__));
   15568 
   15569 	if (sc->sc_type == WM_T_82573) {
   15570 		reg = CSR_READ(sc, WMREG_SWSM);
   15571 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   15572 	} else if (sc->sc_type >= WM_T_82571) {
   15573 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15574 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   15575 	}
   15576 }
   15577 
   15578 static void
   15579 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   15580 {
   15581 	uint32_t reg;
   15582 
   15583 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15584 		device_xname(sc->sc_dev), __func__));
   15585 
   15586 	if (sc->sc_type < WM_T_PCH2)
   15587 		return;
   15588 
   15589 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15590 
   15591 	if (gate)
   15592 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   15593 	else
   15594 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   15595 
   15596 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15597 }
   15598 
   15599 static int
   15600 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   15601 {
   15602 	uint32_t fwsm, reg;
   15603 	int rv;
   15604 
   15605 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15606 		device_xname(sc->sc_dev), __func__));
   15607 
   15608 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   15609 	wm_gate_hw_phy_config_ich8lan(sc, true);
   15610 
   15611 	/* Disable ULP */
   15612 	wm_ulp_disable(sc);
   15613 
   15614 	/* Acquire PHY semaphore */
   15615 	rv = sc->phy.acquire(sc);
   15616 	if (rv != 0) {
   15617 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15618 		device_xname(sc->sc_dev), __func__));
   15619 		return rv;
   15620 	}
   15621 
   15622 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   15623 	 * inaccessible and resetting the PHY is not blocked, toggle the
   15624 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   15625 	 */
   15626 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15627 	switch (sc->sc_type) {
   15628 	case WM_T_PCH_LPT:
   15629 	case WM_T_PCH_SPT:
   15630 	case WM_T_PCH_CNP:
   15631 		if (wm_phy_is_accessible_pchlan(sc))
   15632 			break;
   15633 
   15634 		/* Before toggling LANPHYPC, see if PHY is accessible by
   15635 		 * forcing MAC to SMBus mode first.
   15636 		 */
   15637 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15638 		reg |= CTRL_EXT_FORCE_SMBUS;
   15639 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15640 #if 0
   15641 		/* XXX Isn't this required??? */
   15642 		CSR_WRITE_FLUSH(sc);
   15643 #endif
   15644 		/* Wait 50 milliseconds for MAC to finish any retries
   15645 		 * that it might be trying to perform from previous
   15646 		 * attempts to acknowledge any phy read requests.
   15647 		 */
   15648 		delay(50 * 1000);
   15649 		/* FALLTHROUGH */
   15650 	case WM_T_PCH2:
   15651 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15652 			break;
   15653 		/* FALLTHROUGH */
   15654 	case WM_T_PCH:
   15655 		if (sc->sc_type == WM_T_PCH)
   15656 			if ((fwsm & FWSM_FW_VALID) != 0)
   15657 				break;
   15658 
   15659 		if (wm_phy_resetisblocked(sc) == true) {
   15660 			device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
   15661 			break;
   15662 		}
   15663 
   15664 		/* Toggle LANPHYPC Value bit */
   15665 		wm_toggle_lanphypc_pch_lpt(sc);
   15666 
   15667 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15668 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15669 				break;
   15670 
   15671 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15672 			 * so ensure that the MAC is also out of SMBus mode
   15673 			 */
   15674 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15675 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15676 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15677 
   15678 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15679 				break;
   15680 			rv = -1;
   15681 		}
   15682 		break;
   15683 	default:
   15684 		break;
   15685 	}
   15686 
   15687 	/* Release semaphore */
   15688 	sc->phy.release(sc);
   15689 
   15690 	if (rv == 0) {
   15691 		/* Check to see if able to reset PHY.  Print error if not */
   15692 		if (wm_phy_resetisblocked(sc)) {
   15693 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15694 			goto out;
   15695 		}
   15696 
   15697 		/* Reset the PHY before any access to it.  Doing so, ensures
   15698 		 * that the PHY is in a known good state before we read/write
   15699 		 * PHY registers.  The generic reset is sufficient here,
   15700 		 * because we haven't determined the PHY type yet.
   15701 		 */
   15702 		if (wm_reset_phy(sc) != 0)
   15703 			goto out;
   15704 
   15705 		/* On a successful reset, possibly need to wait for the PHY
   15706 		 * to quiesce to an accessible state before returning control
   15707 		 * to the calling function.  If the PHY does not quiesce, then
   15708 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15709 		 *  the PHY is in.
   15710 		 */
   15711 		if (wm_phy_resetisblocked(sc))
   15712 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15713 	}
   15714 
   15715 out:
   15716 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15717 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15718 		delay(10*1000);
   15719 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15720 	}
   15721 
   15722 	return 0;
   15723 }
   15724 
   15725 static void
   15726 wm_init_manageability(struct wm_softc *sc)
   15727 {
   15728 
   15729 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15730 		device_xname(sc->sc_dev), __func__));
   15731 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   15732 
   15733 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15734 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15735 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15736 
   15737 		/* Disable hardware interception of ARP */
   15738 		manc &= ~MANC_ARP_EN;
   15739 
   15740 		/* Enable receiving management packets to the host */
   15741 		if (sc->sc_type >= WM_T_82571) {
   15742 			manc |= MANC_EN_MNG2HOST;
   15743 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15744 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15745 		}
   15746 
   15747 		CSR_WRITE(sc, WMREG_MANC, manc);
   15748 	}
   15749 }
   15750 
   15751 static void
   15752 wm_release_manageability(struct wm_softc *sc)
   15753 {
   15754 
   15755 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15756 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15757 
   15758 		manc |= MANC_ARP_EN;
   15759 		if (sc->sc_type >= WM_T_82571)
   15760 			manc &= ~MANC_EN_MNG2HOST;
   15761 
   15762 		CSR_WRITE(sc, WMREG_MANC, manc);
   15763 	}
   15764 }
   15765 
   15766 static void
   15767 wm_get_wakeup(struct wm_softc *sc)
   15768 {
   15769 
   15770 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15771 	switch (sc->sc_type) {
   15772 	case WM_T_82573:
   15773 	case WM_T_82583:
   15774 		sc->sc_flags |= WM_F_HAS_AMT;
   15775 		/* FALLTHROUGH */
   15776 	case WM_T_80003:
   15777 	case WM_T_82575:
   15778 	case WM_T_82576:
   15779 	case WM_T_82580:
   15780 	case WM_T_I350:
   15781 	case WM_T_I354:
   15782 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15783 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15784 		/* FALLTHROUGH */
   15785 	case WM_T_82541:
   15786 	case WM_T_82541_2:
   15787 	case WM_T_82547:
   15788 	case WM_T_82547_2:
   15789 	case WM_T_82571:
   15790 	case WM_T_82572:
   15791 	case WM_T_82574:
   15792 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15793 		break;
   15794 	case WM_T_ICH8:
   15795 	case WM_T_ICH9:
   15796 	case WM_T_ICH10:
   15797 	case WM_T_PCH:
   15798 	case WM_T_PCH2:
   15799 	case WM_T_PCH_LPT:
   15800 	case WM_T_PCH_SPT:
   15801 	case WM_T_PCH_CNP:
   15802 		sc->sc_flags |= WM_F_HAS_AMT;
   15803 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15804 		break;
   15805 	default:
   15806 		break;
   15807 	}
   15808 
   15809 	/* 1: HAS_MANAGE */
   15810 	if (wm_enable_mng_pass_thru(sc) != 0)
   15811 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15812 
   15813 	/*
   15814 	 * Note that the WOL flags is set after the resetting of the eeprom
   15815 	 * stuff
   15816 	 */
   15817 }
   15818 
   15819 /*
   15820  * Unconfigure Ultra Low Power mode.
   15821  * Only for I217 and newer (see below).
   15822  */
   15823 static int
   15824 wm_ulp_disable(struct wm_softc *sc)
   15825 {
   15826 	uint32_t reg;
   15827 	uint16_t phyreg;
   15828 	int i = 0, rv;
   15829 
   15830 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15831 		device_xname(sc->sc_dev), __func__));
   15832 	/* Exclude old devices */
   15833 	if ((sc->sc_type < WM_T_PCH_LPT)
   15834 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15835 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15836 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15837 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15838 		return 0;
   15839 
   15840 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15841 		/* Request ME un-configure ULP mode in the PHY */
   15842 		reg = CSR_READ(sc, WMREG_H2ME);
   15843 		reg &= ~H2ME_ULP;
   15844 		reg |= H2ME_ENFORCE_SETTINGS;
   15845 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15846 
   15847 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15848 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15849 			if (i++ == 30) {
   15850 				device_printf(sc->sc_dev, "%s timed out\n",
   15851 				    __func__);
   15852 				return -1;
   15853 			}
   15854 			delay(10 * 1000);
   15855 		}
   15856 		reg = CSR_READ(sc, WMREG_H2ME);
   15857 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15858 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15859 
   15860 		return 0;
   15861 	}
   15862 
   15863 	/* Acquire semaphore */
   15864 	rv = sc->phy.acquire(sc);
   15865 	if (rv != 0) {
   15866 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15867 		device_xname(sc->sc_dev), __func__));
   15868 		return rv;
   15869 	}
   15870 
   15871 	/* Toggle LANPHYPC */
   15872 	wm_toggle_lanphypc_pch_lpt(sc);
   15873 
   15874 	/* Unforce SMBus mode in PHY */
   15875 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15876 	if (rv != 0) {
   15877 		uint32_t reg2;
   15878 
   15879 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15880 			__func__);
   15881 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15882 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15883 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15884 		delay(50 * 1000);
   15885 
   15886 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15887 		    &phyreg);
   15888 		if (rv != 0)
   15889 			goto release;
   15890 	}
   15891 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15892 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15893 
   15894 	/* Unforce SMBus mode in MAC */
   15895 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15896 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15897 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15898 
   15899 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15900 	if (rv != 0)
   15901 		goto release;
   15902 	phyreg |= HV_PM_CTRL_K1_ENA;
   15903 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15904 
   15905 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15906 		&phyreg);
   15907 	if (rv != 0)
   15908 		goto release;
   15909 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15910 	    | I218_ULP_CONFIG1_STICKY_ULP
   15911 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15912 	    | I218_ULP_CONFIG1_WOL_HOST
   15913 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15914 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15915 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15916 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15917 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15918 	phyreg |= I218_ULP_CONFIG1_START;
   15919 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15920 
   15921 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15922 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15923 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15924 
   15925 release:
   15926 	/* Release semaphore */
   15927 	sc->phy.release(sc);
   15928 	wm_gmii_reset(sc);
   15929 	delay(50 * 1000);
   15930 
   15931 	return rv;
   15932 }
   15933 
   15934 /* WOL in the newer chipset interfaces (pchlan) */
   15935 static int
   15936 wm_enable_phy_wakeup(struct wm_softc *sc)
   15937 {
   15938 	device_t dev = sc->sc_dev;
   15939 	uint32_t mreg, moff;
   15940 	uint16_t wuce, wuc, wufc, preg;
   15941 	int i, rv;
   15942 
   15943 	KASSERT(sc->sc_type >= WM_T_PCH);
   15944 
   15945 	/* Copy MAC RARs to PHY RARs */
   15946 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15947 
   15948 	/* Activate PHY wakeup */
   15949 	rv = sc->phy.acquire(sc);
   15950 	if (rv != 0) {
   15951 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15952 		    __func__);
   15953 		return rv;
   15954 	}
   15955 
   15956 	/*
   15957 	 * Enable access to PHY wakeup registers.
   15958 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15959 	 */
   15960 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15961 	if (rv != 0) {
   15962 		device_printf(dev,
   15963 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15964 		goto release;
   15965 	}
   15966 
   15967 	/* Copy MAC MTA to PHY MTA */
   15968 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15969 		uint16_t lo, hi;
   15970 
   15971 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15972 		lo = (uint16_t)(mreg & 0xffff);
   15973 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15974 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15975 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15976 	}
   15977 
   15978 	/* Configure PHY Rx Control register */
   15979 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15980 	mreg = CSR_READ(sc, WMREG_RCTL);
   15981 	if (mreg & RCTL_UPE)
   15982 		preg |= BM_RCTL_UPE;
   15983 	if (mreg & RCTL_MPE)
   15984 		preg |= BM_RCTL_MPE;
   15985 	preg &= ~(BM_RCTL_MO_MASK);
   15986 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15987 	if (moff != 0)
   15988 		preg |= moff << BM_RCTL_MO_SHIFT;
   15989 	if (mreg & RCTL_BAM)
   15990 		preg |= BM_RCTL_BAM;
   15991 	if (mreg & RCTL_PMCF)
   15992 		preg |= BM_RCTL_PMCF;
   15993 	mreg = CSR_READ(sc, WMREG_CTRL);
   15994 	if (mreg & CTRL_RFCE)
   15995 		preg |= BM_RCTL_RFCE;
   15996 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15997 
   15998 	wuc = WUC_APME | WUC_PME_EN;
   15999 	wufc = WUFC_MAG;
   16000 	/* Enable PHY wakeup in MAC register */
   16001 	CSR_WRITE(sc, WMREG_WUC,
   16002 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   16003 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   16004 
   16005 	/* Configure and enable PHY wakeup in PHY registers */
   16006 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   16007 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   16008 
   16009 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   16010 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16011 
   16012 release:
   16013 	sc->phy.release(sc);
   16014 
   16015 	return 0;
   16016 }
   16017 
   16018 /* Power down workaround on D3 */
   16019 static void
   16020 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   16021 {
   16022 	uint32_t reg;
   16023 	uint16_t phyreg;
   16024 	int i;
   16025 
   16026 	for (i = 0; i < 2; i++) {
   16027 		/* Disable link */
   16028 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16029 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16030 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16031 
   16032 		/*
   16033 		 * Call gig speed drop workaround on Gig disable before
   16034 		 * accessing any PHY registers
   16035 		 */
   16036 		if (sc->sc_type == WM_T_ICH8)
   16037 			wm_gig_downshift_workaround_ich8lan(sc);
   16038 
   16039 		/* Write VR power-down enable */
   16040 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16041 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16042 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   16043 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   16044 
   16045 		/* Read it back and test */
   16046 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16047 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16048 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   16049 			break;
   16050 
   16051 		/* Issue PHY reset and repeat at most one more time */
   16052 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   16053 	}
   16054 }
   16055 
   16056 /*
   16057  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   16058  *  @sc: pointer to the HW structure
   16059  *
   16060  *  During S0 to Sx transition, it is possible the link remains at gig
   16061  *  instead of negotiating to a lower speed.  Before going to Sx, set
   16062  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   16063  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   16064  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   16065  *  needs to be written.
   16066  *  Parts that support (and are linked to a partner which support) EEE in
   16067  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   16068  *  than 10Mbps w/o EEE.
   16069  */
   16070 static void
   16071 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   16072 {
   16073 	device_t dev = sc->sc_dev;
   16074 	struct ethercom *ec = &sc->sc_ethercom;
   16075 	uint32_t phy_ctrl;
   16076 	int rv;
   16077 
   16078 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   16079 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   16080 
   16081 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   16082 
   16083 	if (sc->sc_phytype == WMPHY_I217) {
   16084 		uint16_t devid = sc->sc_pcidevid;
   16085 
   16086 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   16087 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   16088 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   16089 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   16090 		    (sc->sc_type >= WM_T_PCH_SPT))
   16091 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   16092 			    CSR_READ(sc, WMREG_FEXTNVM6)
   16093 			    & ~FEXTNVM6_REQ_PLL_CLK);
   16094 
   16095 		if (sc->phy.acquire(sc) != 0)
   16096 			goto out;
   16097 
   16098 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16099 			uint16_t eee_advert;
   16100 
   16101 			rv = wm_read_emi_reg_locked(dev,
   16102 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   16103 			if (rv)
   16104 				goto release;
   16105 
   16106 			/*
   16107 			 * Disable LPLU if both link partners support 100BaseT
   16108 			 * EEE and 100Full is advertised on both ends of the
   16109 			 * link, and enable Auto Enable LPI since there will
   16110 			 * be no driver to enable LPI while in Sx.
   16111 			 */
   16112 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   16113 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   16114 				uint16_t anar, phy_reg;
   16115 
   16116 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   16117 				    &anar);
   16118 				if (anar & ANAR_TX_FD) {
   16119 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   16120 					    PHY_CTRL_NOND0A_LPLU);
   16121 
   16122 					/* Set Auto Enable LPI after link up */
   16123 					sc->phy.readreg_locked(dev, 2,
   16124 					    I217_LPI_GPIO_CTRL, &phy_reg);
   16125 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16126 					sc->phy.writereg_locked(dev, 2,
   16127 					    I217_LPI_GPIO_CTRL, phy_reg);
   16128 				}
   16129 			}
   16130 		}
   16131 
   16132 		/*
   16133 		 * For i217 Intel Rapid Start Technology support,
   16134 		 * when the system is going into Sx and no manageability engine
   16135 		 * is present, the driver must configure proxy to reset only on
   16136 		 * power good.	LPI (Low Power Idle) state must also reset only
   16137 		 * on power good, as well as the MTA (Multicast table array).
   16138 		 * The SMBus release must also be disabled on LCD reset.
   16139 		 */
   16140 
   16141 		/*
   16142 		 * Enable MTA to reset for Intel Rapid Start Technology
   16143 		 * Support
   16144 		 */
   16145 
   16146 release:
   16147 		sc->phy.release(sc);
   16148 	}
   16149 out:
   16150 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   16151 
   16152 	if (sc->sc_type == WM_T_ICH8)
   16153 		wm_gig_downshift_workaround_ich8lan(sc);
   16154 
   16155 	if (sc->sc_type >= WM_T_PCH) {
   16156 		wm_oem_bits_config_ich8lan(sc, false);
   16157 
   16158 		/* Reset PHY to activate OEM bits on 82577/8 */
   16159 		if (sc->sc_type == WM_T_PCH)
   16160 			wm_reset_phy(sc);
   16161 
   16162 		if (sc->phy.acquire(sc) != 0)
   16163 			return;
   16164 		wm_write_smbus_addr(sc);
   16165 		sc->phy.release(sc);
   16166 	}
   16167 }
   16168 
   16169 /*
   16170  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   16171  *  @sc: pointer to the HW structure
   16172  *
   16173  *  During Sx to S0 transitions on non-managed devices or managed devices
   16174  *  on which PHY resets are not blocked, if the PHY registers cannot be
   16175  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   16176  *  the PHY.
   16177  *  On i217, setup Intel Rapid Start Technology.
   16178  */
   16179 static int
   16180 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   16181 {
   16182 	device_t dev = sc->sc_dev;
   16183 	int rv;
   16184 
   16185 	if (sc->sc_type < WM_T_PCH2)
   16186 		return 0;
   16187 
   16188 	rv = wm_init_phy_workarounds_pchlan(sc);
   16189 	if (rv != 0)
   16190 		return rv;
   16191 
   16192 	/* For i217 Intel Rapid Start Technology support when the system
   16193 	 * is transitioning from Sx and no manageability engine is present
   16194 	 * configure SMBus to restore on reset, disable proxy, and enable
   16195 	 * the reset on MTA (Multicast table array).
   16196 	 */
   16197 	if (sc->sc_phytype == WMPHY_I217) {
   16198 		uint16_t phy_reg;
   16199 
   16200 		rv = sc->phy.acquire(sc);
   16201 		if (rv != 0)
   16202 			return rv;
   16203 
   16204 		/* Clear Auto Enable LPI after link up */
   16205 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   16206 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16207 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   16208 
   16209 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16210 			/* Restore clear on SMB if no manageability engine
   16211 			 * is present
   16212 			 */
   16213 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   16214 			    &phy_reg);
   16215 			if (rv != 0)
   16216 				goto release;
   16217 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   16218 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   16219 
   16220 			/* Disable Proxy */
   16221 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   16222 		}
   16223 		/* Enable reset on MTA */
   16224 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   16225 		if (rv != 0)
   16226 			goto release;
   16227 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   16228 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   16229 
   16230 release:
   16231 		sc->phy.release(sc);
   16232 		return rv;
   16233 	}
   16234 
   16235 	return 0;
   16236 }
   16237 
   16238 static void
   16239 wm_enable_wakeup(struct wm_softc *sc)
   16240 {
   16241 	uint32_t reg, pmreg;
   16242 	pcireg_t pmode;
   16243 	int rv = 0;
   16244 
   16245 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16246 		device_xname(sc->sc_dev), __func__));
   16247 
   16248 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16249 	    &pmreg, NULL) == 0)
   16250 		return;
   16251 
   16252 	if ((sc->sc_flags & WM_F_WOL) == 0)
   16253 		goto pme;
   16254 
   16255 	/* Advertise the wakeup capability */
   16256 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   16257 	    | CTRL_SWDPIN(3));
   16258 
   16259 	/* Keep the laser running on fiber adapters */
   16260 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   16261 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   16262 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16263 		reg |= CTRL_EXT_SWDPIN(3);
   16264 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16265 	}
   16266 
   16267 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   16268 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   16269 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   16270 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   16271 		wm_suspend_workarounds_ich8lan(sc);
   16272 
   16273 #if 0	/* For the multicast packet */
   16274 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   16275 	reg |= WUFC_MC;
   16276 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   16277 #endif
   16278 
   16279 	if (sc->sc_type >= WM_T_PCH) {
   16280 		rv = wm_enable_phy_wakeup(sc);
   16281 		if (rv != 0)
   16282 			goto pme;
   16283 	} else {
   16284 		/* Enable wakeup by the MAC */
   16285 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   16286 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   16287 	}
   16288 
   16289 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   16290 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   16291 		|| (sc->sc_type == WM_T_PCH2))
   16292 	    && (sc->sc_phytype == WMPHY_IGP_3))
   16293 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   16294 
   16295 pme:
   16296 	/* Request PME */
   16297 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   16298 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   16299 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   16300 		/* For WOL */
   16301 		pmode |= PCI_PMCSR_PME_EN;
   16302 	} else {
   16303 		/* Disable WOL */
   16304 		pmode &= ~PCI_PMCSR_PME_EN;
   16305 	}
   16306 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   16307 }
   16308 
   16309 /* Disable ASPM L0s and/or L1 for workaround */
   16310 static void
   16311 wm_disable_aspm(struct wm_softc *sc)
   16312 {
   16313 	pcireg_t reg, mask = 0;
   16314 	unsigned const char *str = "";
   16315 
   16316 	/*
   16317 	 *  Only for PCIe device which has PCIe capability in the PCI config
   16318 	 * space.
   16319 	 */
   16320 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   16321 		return;
   16322 
   16323 	switch (sc->sc_type) {
   16324 	case WM_T_82571:
   16325 	case WM_T_82572:
   16326 		/*
   16327 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   16328 		 * State Power management L1 State (ASPM L1).
   16329 		 */
   16330 		mask = PCIE_LCSR_ASPM_L1;
   16331 		str = "L1 is";
   16332 		break;
   16333 	case WM_T_82573:
   16334 	case WM_T_82574:
   16335 	case WM_T_82583:
   16336 		/*
   16337 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   16338 		 *
   16339 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   16340 		 * some chipset.  The document of 82574 and 82583 says that
   16341 		 * disabling L0s with some specific chipset is sufficient,
   16342 		 * but we follow as of the Intel em driver does.
   16343 		 *
   16344 		 * References:
   16345 		 * Errata 8 of the Specification Update of i82573.
   16346 		 * Errata 20 of the Specification Update of i82574.
   16347 		 * Errata 9 of the Specification Update of i82583.
   16348 		 */
   16349 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   16350 		str = "L0s and L1 are";
   16351 		break;
   16352 	default:
   16353 		return;
   16354 	}
   16355 
   16356 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16357 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   16358 	reg &= ~mask;
   16359 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16360 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   16361 
   16362 	/* Print only in wm_attach() */
   16363 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   16364 		aprint_verbose_dev(sc->sc_dev,
   16365 		    "ASPM %s disabled to workaround the errata.\n", str);
   16366 }
   16367 
   16368 /* LPLU */
   16369 
   16370 static void
   16371 wm_lplu_d0_disable(struct wm_softc *sc)
   16372 {
   16373 	struct mii_data *mii = &sc->sc_mii;
   16374 	uint32_t reg;
   16375 	uint16_t phyval;
   16376 
   16377 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16378 		device_xname(sc->sc_dev), __func__));
   16379 
   16380 	if (sc->sc_phytype == WMPHY_IFE)
   16381 		return;
   16382 
   16383 	switch (sc->sc_type) {
   16384 	case WM_T_82571:
   16385 	case WM_T_82572:
   16386 	case WM_T_82573:
   16387 	case WM_T_82575:
   16388 	case WM_T_82576:
   16389 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   16390 		phyval &= ~PMR_D0_LPLU;
   16391 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   16392 		break;
   16393 	case WM_T_82580:
   16394 	case WM_T_I350:
   16395 	case WM_T_I210:
   16396 	case WM_T_I211:
   16397 		reg = CSR_READ(sc, WMREG_PHPM);
   16398 		reg &= ~PHPM_D0A_LPLU;
   16399 		CSR_WRITE(sc, WMREG_PHPM, reg);
   16400 		break;
   16401 	case WM_T_82574:
   16402 	case WM_T_82583:
   16403 	case WM_T_ICH8:
   16404 	case WM_T_ICH9:
   16405 	case WM_T_ICH10:
   16406 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16407 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   16408 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16409 		CSR_WRITE_FLUSH(sc);
   16410 		break;
   16411 	case WM_T_PCH:
   16412 	case WM_T_PCH2:
   16413 	case WM_T_PCH_LPT:
   16414 	case WM_T_PCH_SPT:
   16415 	case WM_T_PCH_CNP:
   16416 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   16417 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   16418 		if (wm_phy_resetisblocked(sc) == false)
   16419 			phyval |= HV_OEM_BITS_ANEGNOW;
   16420 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   16421 		break;
   16422 	default:
   16423 		break;
   16424 	}
   16425 }
   16426 
   16427 /* EEE */
   16428 
   16429 static int
   16430 wm_set_eee_i350(struct wm_softc *sc)
   16431 {
   16432 	struct ethercom *ec = &sc->sc_ethercom;
   16433 	uint32_t ipcnfg, eeer;
   16434 	uint32_t ipcnfg_mask
   16435 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   16436 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   16437 
   16438 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   16439 
   16440 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   16441 	eeer = CSR_READ(sc, WMREG_EEER);
   16442 
   16443 	/* Enable or disable per user setting */
   16444 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16445 		ipcnfg |= ipcnfg_mask;
   16446 		eeer |= eeer_mask;
   16447 	} else {
   16448 		ipcnfg &= ~ipcnfg_mask;
   16449 		eeer &= ~eeer_mask;
   16450 	}
   16451 
   16452 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   16453 	CSR_WRITE(sc, WMREG_EEER, eeer);
   16454 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   16455 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   16456 
   16457 	return 0;
   16458 }
   16459 
   16460 static int
   16461 wm_set_eee_pchlan(struct wm_softc *sc)
   16462 {
   16463 	device_t dev = sc->sc_dev;
   16464 	struct ethercom *ec = &sc->sc_ethercom;
   16465 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   16466 	int rv;
   16467 
   16468 	switch (sc->sc_phytype) {
   16469 	case WMPHY_82579:
   16470 		lpa = I82579_EEE_LP_ABILITY;
   16471 		pcs_status = I82579_EEE_PCS_STATUS;
   16472 		adv_addr = I82579_EEE_ADVERTISEMENT;
   16473 		break;
   16474 	case WMPHY_I217:
   16475 		lpa = I217_EEE_LP_ABILITY;
   16476 		pcs_status = I217_EEE_PCS_STATUS;
   16477 		adv_addr = I217_EEE_ADVERTISEMENT;
   16478 		break;
   16479 	default:
   16480 		return 0;
   16481 	}
   16482 
   16483 	rv = sc->phy.acquire(sc);
   16484 	if (rv != 0) {
   16485 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   16486 		return rv;
   16487 	}
   16488 
   16489 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   16490 	if (rv != 0)
   16491 		goto release;
   16492 
   16493 	/* Clear bits that enable EEE in various speeds */
   16494 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   16495 
   16496 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16497 		/* Save off link partner's EEE ability */
   16498 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   16499 		if (rv != 0)
   16500 			goto release;
   16501 
   16502 		/* Read EEE advertisement */
   16503 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   16504 			goto release;
   16505 
   16506 		/*
   16507 		 * Enable EEE only for speeds in which the link partner is
   16508 		 * EEE capable and for which we advertise EEE.
   16509 		 */
   16510 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   16511 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   16512 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   16513 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   16514 			if ((data & ANLPAR_TX_FD) != 0)
   16515 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   16516 			else {
   16517 				/*
   16518 				 * EEE is not supported in 100Half, so ignore
   16519 				 * partner's EEE in 100 ability if full-duplex
   16520 				 * is not advertised.
   16521 				 */
   16522 				sc->eee_lp_ability
   16523 				    &= ~AN_EEEADVERT_100_TX;
   16524 			}
   16525 		}
   16526 	}
   16527 
   16528 	if (sc->sc_phytype == WMPHY_82579) {
   16529 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   16530 		if (rv != 0)
   16531 			goto release;
   16532 
   16533 		data &= ~I82579_LPI_PLL_SHUT_100;
   16534 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   16535 	}
   16536 
   16537 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   16538 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   16539 		goto release;
   16540 
   16541 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   16542 release:
   16543 	sc->phy.release(sc);
   16544 
   16545 	return rv;
   16546 }
   16547 
   16548 static int
   16549 wm_set_eee(struct wm_softc *sc)
   16550 {
   16551 	struct ethercom *ec = &sc->sc_ethercom;
   16552 
   16553 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   16554 		return 0;
   16555 
   16556 	if (sc->sc_type == WM_T_I354) {
   16557 		/* I354 uses an external PHY */
   16558 		return 0; /* not yet */
   16559 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   16560 		return wm_set_eee_i350(sc);
   16561 	else if (sc->sc_type >= WM_T_PCH2)
   16562 		return wm_set_eee_pchlan(sc);
   16563 
   16564 	return 0;
   16565 }
   16566 
   16567 /*
   16568  * Workarounds (mainly PHY related).
   16569  * Basically, PHY's workarounds are in the PHY drivers.
   16570  */
   16571 
   16572 /* Workaround for 82566 Kumeran PCS lock loss */
   16573 static int
   16574 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   16575 {
   16576 	struct mii_data *mii = &sc->sc_mii;
   16577 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16578 	int i, reg, rv;
   16579 	uint16_t phyreg;
   16580 
   16581 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16582 		device_xname(sc->sc_dev), __func__));
   16583 
   16584 	/* If the link is not up, do nothing */
   16585 	if ((status & STATUS_LU) == 0)
   16586 		return 0;
   16587 
   16588 	/* Nothing to do if the link is other than 1Gbps */
   16589 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   16590 		return 0;
   16591 
   16592 	for (i = 0; i < 10; i++) {
   16593 		/* read twice */
   16594 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16595 		if (rv != 0)
   16596 			return rv;
   16597 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16598 		if (rv != 0)
   16599 			return rv;
   16600 
   16601 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   16602 			goto out;	/* GOOD! */
   16603 
   16604 		/* Reset the PHY */
   16605 		wm_reset_phy(sc);
   16606 		delay(5*1000);
   16607 	}
   16608 
   16609 	/* Disable GigE link negotiation */
   16610 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16611 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16612 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16613 
   16614 	/*
   16615 	 * Call gig speed drop workaround on Gig disable before accessing
   16616 	 * any PHY registers.
   16617 	 */
   16618 	wm_gig_downshift_workaround_ich8lan(sc);
   16619 
   16620 out:
   16621 	return 0;
   16622 }
   16623 
   16624 /*
   16625  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   16626  *  @sc: pointer to the HW structure
   16627  *
   16628  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   16629  *  LPLU, Gig disable, MDIC PHY reset):
   16630  *    1) Set Kumeran Near-end loopback
   16631  *    2) Clear Kumeran Near-end loopback
   16632  *  Should only be called for ICH8[m] devices with any 1G Phy.
   16633  */
   16634 static void
   16635 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   16636 {
   16637 	uint16_t kmreg;
   16638 
   16639 	/* Only for igp3 */
   16640 	if (sc->sc_phytype == WMPHY_IGP_3) {
   16641 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   16642 			return;
   16643 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   16644 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   16645 			return;
   16646 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   16647 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   16648 	}
   16649 }
   16650 
   16651 /*
   16652  * Workaround for pch's PHYs
   16653  * XXX should be moved to new PHY driver?
   16654  */
   16655 static int
   16656 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16657 {
   16658 	device_t dev = sc->sc_dev;
   16659 	struct mii_data *mii = &sc->sc_mii;
   16660 	struct mii_softc *child;
   16661 	uint16_t phy_data, phyrev = 0;
   16662 	int phytype = sc->sc_phytype;
   16663 	int rv;
   16664 
   16665 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16666 		device_xname(dev), __func__));
   16667 	KASSERT(sc->sc_type == WM_T_PCH);
   16668 
   16669 	/* Set MDIO slow mode before any other MDIO access */
   16670 	if (phytype == WMPHY_82577)
   16671 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16672 			return rv;
   16673 
   16674 	child = LIST_FIRST(&mii->mii_phys);
   16675 	if (child != NULL)
   16676 		phyrev = child->mii_mpd_rev;
   16677 
   16678 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16679 	if ((child != NULL) &&
   16680 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16681 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16682 		/* Disable generation of early preamble (0x4431) */
   16683 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16684 		    &phy_data);
   16685 		if (rv != 0)
   16686 			return rv;
   16687 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16688 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16689 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16690 		    phy_data);
   16691 		if (rv != 0)
   16692 			return rv;
   16693 
   16694 		/* Preamble tuning for SSC */
   16695 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16696 		if (rv != 0)
   16697 			return rv;
   16698 	}
   16699 
   16700 	/* 82578 */
   16701 	if (phytype == WMPHY_82578) {
   16702 		/*
   16703 		 * Return registers to default by doing a soft reset then
   16704 		 * writing 0x3140 to the control register
   16705 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16706 		 */
   16707 		if ((child != NULL) && (phyrev < 2)) {
   16708 			PHY_RESET(child);
   16709 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16710 			if (rv != 0)
   16711 				return rv;
   16712 		}
   16713 	}
   16714 
   16715 	/* Select page 0 */
   16716 	if ((rv = sc->phy.acquire(sc)) != 0)
   16717 		return rv;
   16718 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16719 	sc->phy.release(sc);
   16720 	if (rv != 0)
   16721 		return rv;
   16722 
   16723 	/*
   16724 	 * Configure the K1 Si workaround during phy reset assuming there is
   16725 	 * link so that it disables K1 if link is in 1Gbps.
   16726 	 */
   16727 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16728 		return rv;
   16729 
   16730 	/* Workaround for link disconnects on a busy hub in half duplex */
   16731 	rv = sc->phy.acquire(sc);
   16732 	if (rv)
   16733 		return rv;
   16734 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16735 	if (rv)
   16736 		goto release;
   16737 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16738 	    phy_data & 0x00ff);
   16739 	if (rv)
   16740 		goto release;
   16741 
   16742 	/* Set MSE higher to enable link to stay up when noise is high */
   16743 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16744 release:
   16745 	sc->phy.release(sc);
   16746 
   16747 	return rv;
   16748 }
   16749 
   16750 /*
   16751  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16752  *  @sc:   pointer to the HW structure
   16753  */
   16754 static void
   16755 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16756 {
   16757 
   16758 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16759 		device_xname(sc->sc_dev), __func__));
   16760 
   16761 	if (sc->phy.acquire(sc) != 0)
   16762 		return;
   16763 
   16764 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16765 
   16766 	sc->phy.release(sc);
   16767 }
   16768 
   16769 static void
   16770 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16771 {
   16772 	device_t dev = sc->sc_dev;
   16773 	uint32_t mac_reg;
   16774 	uint16_t i, wuce;
   16775 	int count;
   16776 
   16777 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16778 		device_xname(dev), __func__));
   16779 
   16780 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16781 		return;
   16782 
   16783 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16784 	count = wm_rar_count(sc);
   16785 	for (i = 0; i < count; i++) {
   16786 		uint16_t lo, hi;
   16787 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16788 		lo = (uint16_t)(mac_reg & 0xffff);
   16789 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16790 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16791 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16792 
   16793 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16794 		lo = (uint16_t)(mac_reg & 0xffff);
   16795 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16796 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16797 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16798 	}
   16799 
   16800 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16801 }
   16802 
   16803 /*
   16804  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16805  *  with 82579 PHY
   16806  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16807  */
   16808 static int
   16809 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16810 {
   16811 	device_t dev = sc->sc_dev;
   16812 	int rar_count;
   16813 	int rv;
   16814 	uint32_t mac_reg;
   16815 	uint16_t dft_ctrl, data;
   16816 	uint16_t i;
   16817 
   16818 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16819 		device_xname(dev), __func__));
   16820 
   16821 	if (sc->sc_type < WM_T_PCH2)
   16822 		return 0;
   16823 
   16824 	/* Acquire PHY semaphore */
   16825 	rv = sc->phy.acquire(sc);
   16826 	if (rv != 0)
   16827 		return rv;
   16828 
   16829 	/* Disable Rx path while enabling/disabling workaround */
   16830 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16831 	if (rv != 0)
   16832 		goto out;
   16833 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16834 	    dft_ctrl | (1 << 14));
   16835 	if (rv != 0)
   16836 		goto out;
   16837 
   16838 	if (enable) {
   16839 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16840 		 * SHRAL/H) and initial CRC values to the MAC
   16841 		 */
   16842 		rar_count = wm_rar_count(sc);
   16843 		for (i = 0; i < rar_count; i++) {
   16844 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16845 			uint32_t addr_high, addr_low;
   16846 
   16847 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16848 			if (!(addr_high & RAL_AV))
   16849 				continue;
   16850 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16851 			mac_addr[0] = (addr_low & 0xFF);
   16852 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16853 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16854 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16855 			mac_addr[4] = (addr_high & 0xFF);
   16856 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16857 
   16858 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16859 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16860 		}
   16861 
   16862 		/* Write Rx addresses to the PHY */
   16863 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16864 	}
   16865 
   16866 	/*
   16867 	 * If enable ==
   16868 	 *	true: Enable jumbo frame workaround in the MAC.
   16869 	 *	false: Write MAC register values back to h/w defaults.
   16870 	 */
   16871 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16872 	if (enable) {
   16873 		mac_reg &= ~(1 << 14);
   16874 		mac_reg |= (7 << 15);
   16875 	} else
   16876 		mac_reg &= ~(0xf << 14);
   16877 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16878 
   16879 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16880 	if (enable) {
   16881 		mac_reg |= RCTL_SECRC;
   16882 		sc->sc_rctl |= RCTL_SECRC;
   16883 		sc->sc_flags |= WM_F_CRC_STRIP;
   16884 	} else {
   16885 		mac_reg &= ~RCTL_SECRC;
   16886 		sc->sc_rctl &= ~RCTL_SECRC;
   16887 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16888 	}
   16889 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16890 
   16891 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16892 	if (rv != 0)
   16893 		goto out;
   16894 	if (enable)
   16895 		data |= 1 << 0;
   16896 	else
   16897 		data &= ~(1 << 0);
   16898 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16899 	if (rv != 0)
   16900 		goto out;
   16901 
   16902 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16903 	if (rv != 0)
   16904 		goto out;
   16905 	/*
   16906 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16907 	 * on both the enable case and the disable case. Is it correct?
   16908 	 */
   16909 	data &= ~(0xf << 8);
   16910 	data |= (0xb << 8);
   16911 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16912 	if (rv != 0)
   16913 		goto out;
   16914 
   16915 	/*
   16916 	 * If enable ==
   16917 	 *	true: Enable jumbo frame workaround in the PHY.
   16918 	 *	false: Write PHY register values back to h/w defaults.
   16919 	 */
   16920 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16921 	if (rv != 0)
   16922 		goto out;
   16923 	data &= ~(0x7F << 5);
   16924 	if (enable)
   16925 		data |= (0x37 << 5);
   16926 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16927 	if (rv != 0)
   16928 		goto out;
   16929 
   16930 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16931 	if (rv != 0)
   16932 		goto out;
   16933 	if (enable)
   16934 		data &= ~(1 << 13);
   16935 	else
   16936 		data |= (1 << 13);
   16937 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16938 	if (rv != 0)
   16939 		goto out;
   16940 
   16941 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16942 	if (rv != 0)
   16943 		goto out;
   16944 	data &= ~(0x3FF << 2);
   16945 	if (enable)
   16946 		data |= (I82579_TX_PTR_GAP << 2);
   16947 	else
   16948 		data |= (0x8 << 2);
   16949 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16950 	if (rv != 0)
   16951 		goto out;
   16952 
   16953 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16954 	    enable ? 0xf100 : 0x7e00);
   16955 	if (rv != 0)
   16956 		goto out;
   16957 
   16958 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16959 	if (rv != 0)
   16960 		goto out;
   16961 	if (enable)
   16962 		data |= 1 << 10;
   16963 	else
   16964 		data &= ~(1 << 10);
   16965 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16966 	if (rv != 0)
   16967 		goto out;
   16968 
   16969 	/* Re-enable Rx path after enabling/disabling workaround */
   16970 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16971 	    dft_ctrl & ~(1 << 14));
   16972 
   16973 out:
   16974 	sc->phy.release(sc);
   16975 
   16976 	return rv;
   16977 }
   16978 
   16979 /*
   16980  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16981  *  done after every PHY reset.
   16982  */
   16983 static int
   16984 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16985 {
   16986 	device_t dev = sc->sc_dev;
   16987 	int rv;
   16988 
   16989 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16990 		device_xname(dev), __func__));
   16991 	KASSERT(sc->sc_type == WM_T_PCH2);
   16992 
   16993 	/* Set MDIO slow mode before any other MDIO access */
   16994 	rv = wm_set_mdio_slow_mode_hv(sc);
   16995 	if (rv != 0)
   16996 		return rv;
   16997 
   16998 	rv = sc->phy.acquire(sc);
   16999 	if (rv != 0)
   17000 		return rv;
   17001 	/* Set MSE higher to enable link to stay up when noise is high */
   17002 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   17003 	if (rv != 0)
   17004 		goto release;
   17005 	/* Drop link after 5 times MSE threshold was reached */
   17006 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   17007 release:
   17008 	sc->phy.release(sc);
   17009 
   17010 	return rv;
   17011 }
   17012 
   17013 /**
   17014  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   17015  *  @link: link up bool flag
   17016  *
   17017  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   17018  *  preventing further DMA write requests.  Workaround the issue by disabling
   17019  *  the de-assertion of the clock request when in 1Gpbs mode.
   17020  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   17021  *  speeds in order to avoid Tx hangs.
   17022  **/
   17023 static int
   17024 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   17025 {
   17026 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   17027 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   17028 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   17029 	uint16_t phyreg;
   17030 
   17031 	if (link && (speed == STATUS_SPEED_1000)) {
   17032 		int rv;
   17033 
   17034 		rv = sc->phy.acquire(sc);
   17035 		if (rv != 0)
   17036 			return rv;
   17037 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17038 		    &phyreg);
   17039 		if (rv != 0)
   17040 			goto release;
   17041 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17042 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   17043 		if (rv != 0)
   17044 			goto release;
   17045 		delay(20);
   17046 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   17047 
   17048 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17049 		    &phyreg);
   17050 release:
   17051 		sc->phy.release(sc);
   17052 		return rv;
   17053 	}
   17054 
   17055 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   17056 
   17057 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   17058 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   17059 	    || !link
   17060 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   17061 		goto update_fextnvm6;
   17062 
   17063 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   17064 
   17065 	/* Clear link status transmit timeout */
   17066 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   17067 	if (speed == STATUS_SPEED_100) {
   17068 		/* Set inband Tx timeout to 5x10us for 100Half */
   17069 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17070 
   17071 		/* Do not extend the K1 entry latency for 100Half */
   17072 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17073 	} else {
   17074 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   17075 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17076 
   17077 		/* Extend the K1 entry latency for 10 Mbps */
   17078 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17079 	}
   17080 
   17081 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   17082 
   17083 update_fextnvm6:
   17084 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   17085 	return 0;
   17086 }
   17087 
   17088 /*
   17089  *  wm_k1_gig_workaround_hv - K1 Si workaround
   17090  *  @sc:   pointer to the HW structure
   17091  *  @link: link up bool flag
   17092  *
   17093  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   17094  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   17095  *  If link is down, the function will restore the default K1 setting located
   17096  *  in the NVM.
   17097  */
   17098 static int
   17099 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   17100 {
   17101 	int k1_enable = sc->sc_nvm_k1_enabled;
   17102 	int rv;
   17103 
   17104 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17105 		device_xname(sc->sc_dev), __func__));
   17106 
   17107 	rv = sc->phy.acquire(sc);
   17108 	if (rv != 0)
   17109 		return rv;
   17110 
   17111 	if (link) {
   17112 		k1_enable = 0;
   17113 
   17114 		/* Link stall fix for link up */
   17115 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17116 		    0x0100);
   17117 	} else {
   17118 		/* Link stall fix for link down */
   17119 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17120 		    0x4100);
   17121 	}
   17122 
   17123 	wm_configure_k1_ich8lan(sc, k1_enable);
   17124 	sc->phy.release(sc);
   17125 
   17126 	return 0;
   17127 }
   17128 
   17129 /*
   17130  *  wm_k1_workaround_lv - K1 Si workaround
   17131  *  @sc:   pointer to the HW structure
   17132  *
   17133  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   17134  *  Disable K1 for 1000 and 100 speeds
   17135  */
   17136 static int
   17137 wm_k1_workaround_lv(struct wm_softc *sc)
   17138 {
   17139 	uint32_t reg;
   17140 	uint16_t phyreg;
   17141 	int rv;
   17142 
   17143 	if (sc->sc_type != WM_T_PCH2)
   17144 		return 0;
   17145 
   17146 	/* Set K1 beacon duration based on 10Mbps speed */
   17147 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   17148 	if (rv != 0)
   17149 		return rv;
   17150 
   17151 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   17152 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   17153 		if (phyreg &
   17154 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   17155 			/* LV 1G/100 Packet drop issue wa  */
   17156 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   17157 			    &phyreg);
   17158 			if (rv != 0)
   17159 				return rv;
   17160 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   17161 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   17162 			    phyreg);
   17163 			if (rv != 0)
   17164 				return rv;
   17165 		} else {
   17166 			/* For 10Mbps */
   17167 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   17168 			reg &= ~FEXTNVM4_BEACON_DURATION;
   17169 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   17170 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   17171 		}
   17172 	}
   17173 
   17174 	return 0;
   17175 }
   17176 
   17177 /*
   17178  *  wm_link_stall_workaround_hv - Si workaround
   17179  *  @sc: pointer to the HW structure
   17180  *
   17181  *  This function works around a Si bug where the link partner can get
   17182  *  a link up indication before the PHY does. If small packets are sent
   17183  *  by the link partner they can be placed in the packet buffer without
   17184  *  being properly accounted for by the PHY and will stall preventing
   17185  *  further packets from being received.  The workaround is to clear the
   17186  *  packet buffer after the PHY detects link up.
   17187  */
   17188 static int
   17189 wm_link_stall_workaround_hv(struct wm_softc *sc)
   17190 {
   17191 	uint16_t phyreg;
   17192 
   17193 	if (sc->sc_phytype != WMPHY_82578)
   17194 		return 0;
   17195 
   17196 	/* Do not apply workaround if in PHY loopback bit 14 set */
   17197 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   17198 	if ((phyreg & BMCR_LOOP) != 0)
   17199 		return 0;
   17200 
   17201 	/* Check if link is up and at 1Gbps */
   17202 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   17203 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17204 	    | BM_CS_STATUS_SPEED_MASK;
   17205 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17206 		| BM_CS_STATUS_SPEED_1000))
   17207 		return 0;
   17208 
   17209 	delay(200 * 1000);	/* XXX too big */
   17210 
   17211 	/* Flush the packets in the fifo buffer */
   17212 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17213 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   17214 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17215 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   17216 
   17217 	return 0;
   17218 }
   17219 
   17220 static int
   17221 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   17222 {
   17223 	int rv;
   17224 
   17225 	rv = sc->phy.acquire(sc);
   17226 	if (rv != 0) {
   17227 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   17228 		    __func__);
   17229 		return rv;
   17230 	}
   17231 
   17232 	rv = wm_set_mdio_slow_mode_hv_locked(sc);
   17233 
   17234 	sc->phy.release(sc);
   17235 
   17236 	return rv;
   17237 }
   17238 
   17239 static int
   17240 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
   17241 {
   17242 	int rv;
   17243 	uint16_t reg;
   17244 
   17245 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   17246 	if (rv != 0)
   17247 		return rv;
   17248 
   17249 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   17250 	    reg | HV_KMRN_MDIO_SLOW);
   17251 }
   17252 
   17253 /*
   17254  *  wm_configure_k1_ich8lan - Configure K1 power state
   17255  *  @sc: pointer to the HW structure
   17256  *  @enable: K1 state to configure
   17257  *
   17258  *  Configure the K1 power state based on the provided parameter.
   17259  *  Assumes semaphore already acquired.
   17260  */
   17261 static void
   17262 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   17263 {
   17264 	uint32_t ctrl, ctrl_ext, tmp;
   17265 	uint16_t kmreg;
   17266 	int rv;
   17267 
   17268 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17269 
   17270 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   17271 	if (rv != 0)
   17272 		return;
   17273 
   17274 	if (k1_enable)
   17275 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   17276 	else
   17277 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   17278 
   17279 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   17280 	if (rv != 0)
   17281 		return;
   17282 
   17283 	delay(20);
   17284 
   17285 	ctrl = CSR_READ(sc, WMREG_CTRL);
   17286 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   17287 
   17288 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   17289 	tmp |= CTRL_FRCSPD;
   17290 
   17291 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   17292 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   17293 	CSR_WRITE_FLUSH(sc);
   17294 	delay(20);
   17295 
   17296 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   17297 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   17298 	CSR_WRITE_FLUSH(sc);
   17299 	delay(20);
   17300 
   17301 	return;
   17302 }
   17303 
   17304 /* special case - for 82575 - need to do manual init ... */
   17305 static void
   17306 wm_reset_init_script_82575(struct wm_softc *sc)
   17307 {
   17308 	/*
   17309 	 * Remark: this is untested code - we have no board without EEPROM
   17310 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   17311 	 */
   17312 
   17313 	/* SerDes configuration via SERDESCTRL */
   17314 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   17315 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   17316 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   17317 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   17318 
   17319 	/* CCM configuration via CCMCTL register */
   17320 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   17321 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   17322 
   17323 	/* PCIe lanes configuration */
   17324 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   17325 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   17326 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   17327 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   17328 
   17329 	/* PCIe PLL Configuration */
   17330 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   17331 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   17332 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   17333 }
   17334 
   17335 static void
   17336 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   17337 {
   17338 	uint32_t reg;
   17339 	uint16_t nvmword;
   17340 	int rv;
   17341 
   17342 	if (sc->sc_type != WM_T_82580)
   17343 		return;
   17344 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   17345 		return;
   17346 
   17347 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   17348 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   17349 	if (rv != 0) {
   17350 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   17351 		    __func__);
   17352 		return;
   17353 	}
   17354 
   17355 	reg = CSR_READ(sc, WMREG_MDICNFG);
   17356 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   17357 		reg |= MDICNFG_DEST;
   17358 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   17359 		reg |= MDICNFG_COM_MDIO;
   17360 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17361 }
   17362 
   17363 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   17364 
   17365 static bool
   17366 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   17367 {
   17368 	uint32_t reg;
   17369 	uint16_t id1, id2;
   17370 	int i, rv;
   17371 
   17372 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17373 		device_xname(sc->sc_dev), __func__));
   17374 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17375 
   17376 	id1 = id2 = 0xffff;
   17377 	for (i = 0; i < 2; i++) {
   17378 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17379 		    &id1);
   17380 		if ((rv != 0) || MII_INVALIDID(id1))
   17381 			continue;
   17382 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17383 		    &id2);
   17384 		if ((rv != 0) || MII_INVALIDID(id2))
   17385 			continue;
   17386 		break;
   17387 	}
   17388 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   17389 		goto out;
   17390 
   17391 	/*
   17392 	 * In case the PHY needs to be in mdio slow mode,
   17393 	 * set slow mode and try to get the PHY id again.
   17394 	 */
   17395 	rv = 0;
   17396 	if (sc->sc_type < WM_T_PCH_LPT) {
   17397 		wm_set_mdio_slow_mode_hv_locked(sc);
   17398 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17399 		    &id1);
   17400 		rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17401 		    &id2);
   17402 	}
   17403 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   17404 		device_printf(sc->sc_dev, "XXX return with false\n");
   17405 		return false;
   17406 	}
   17407 out:
   17408 	if (sc->sc_type >= WM_T_PCH_LPT) {
   17409 		/* Only unforce SMBus if ME is not active */
   17410 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   17411 			uint16_t phyreg;
   17412 
   17413 			/* Unforce SMBus mode in PHY */
   17414 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   17415 			    CV_SMB_CTRL, &phyreg);
   17416 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   17417 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   17418 			    CV_SMB_CTRL, phyreg);
   17419 
   17420 			/* Unforce SMBus mode in MAC */
   17421 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17422 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   17423 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17424 		}
   17425 	}
   17426 	return true;
   17427 }
   17428 
   17429 static void
   17430 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   17431 {
   17432 	uint32_t reg;
   17433 	int i;
   17434 
   17435 	/* Set PHY Config Counter to 50msec */
   17436 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   17437 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   17438 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   17439 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   17440 
   17441 	/* Toggle LANPHYPC */
   17442 	reg = CSR_READ(sc, WMREG_CTRL);
   17443 	reg |= CTRL_LANPHYPC_OVERRIDE;
   17444 	reg &= ~CTRL_LANPHYPC_VALUE;
   17445 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17446 	CSR_WRITE_FLUSH(sc);
   17447 	delay(1000);
   17448 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   17449 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17450 	CSR_WRITE_FLUSH(sc);
   17451 
   17452 	if (sc->sc_type < WM_T_PCH_LPT)
   17453 		delay(50 * 1000);
   17454 	else {
   17455 		i = 20;
   17456 
   17457 		do {
   17458 			delay(5 * 1000);
   17459 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   17460 		    && i--);
   17461 
   17462 		delay(30 * 1000);
   17463 	}
   17464 }
   17465 
   17466 static int
   17467 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   17468 {
   17469 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   17470 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   17471 	uint32_t rxa;
   17472 	uint16_t scale = 0, lat_enc = 0;
   17473 	int32_t obff_hwm = 0;
   17474 	int64_t lat_ns, value;
   17475 
   17476 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17477 		device_xname(sc->sc_dev), __func__));
   17478 
   17479 	if (link) {
   17480 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   17481 		uint32_t status;
   17482 		uint16_t speed;
   17483 		pcireg_t preg;
   17484 
   17485 		status = CSR_READ(sc, WMREG_STATUS);
   17486 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   17487 		case STATUS_SPEED_10:
   17488 			speed = 10;
   17489 			break;
   17490 		case STATUS_SPEED_100:
   17491 			speed = 100;
   17492 			break;
   17493 		case STATUS_SPEED_1000:
   17494 			speed = 1000;
   17495 			break;
   17496 		default:
   17497 			device_printf(sc->sc_dev, "Unknown speed "
   17498 			    "(status = %08x)\n", status);
   17499 			return -1;
   17500 		}
   17501 
   17502 		/* Rx Packet Buffer Allocation size (KB) */
   17503 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   17504 
   17505 		/*
   17506 		 * Determine the maximum latency tolerated by the device.
   17507 		 *
   17508 		 * Per the PCIe spec, the tolerated latencies are encoded as
   17509 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   17510 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   17511 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   17512 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   17513 		 */
   17514 		lat_ns = ((int64_t)rxa * 1024 -
   17515 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   17516 			+ ETHER_HDR_LEN))) * 8 * 1000;
   17517 		if (lat_ns < 0)
   17518 			lat_ns = 0;
   17519 		else
   17520 			lat_ns /= speed;
   17521 		value = lat_ns;
   17522 
   17523 		while (value > LTRV_VALUE) {
   17524 			scale ++;
   17525 			value = howmany(value, __BIT(5));
   17526 		}
   17527 		if (scale > LTRV_SCALE_MAX) {
   17528 			device_printf(sc->sc_dev,
   17529 			    "Invalid LTR latency scale %d\n", scale);
   17530 			return -1;
   17531 		}
   17532 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   17533 
   17534 		/* Determine the maximum latency tolerated by the platform */
   17535 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17536 		    WM_PCI_LTR_CAP_LPT);
   17537 		max_snoop = preg & 0xffff;
   17538 		max_nosnoop = preg >> 16;
   17539 
   17540 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   17541 
   17542 		if (lat_enc > max_ltr_enc) {
   17543 			lat_enc = max_ltr_enc;
   17544 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   17545 			    * PCI_LTR_SCALETONS(
   17546 				    __SHIFTOUT(lat_enc,
   17547 					PCI_LTR_MAXSNOOPLAT_SCALE));
   17548 		}
   17549 
   17550 		if (lat_ns) {
   17551 			lat_ns *= speed * 1000;
   17552 			lat_ns /= 8;
   17553 			lat_ns /= 1000000000;
   17554 			obff_hwm = (int32_t)(rxa - lat_ns);
   17555 		}
   17556 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   17557 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   17558 			    "(rxa = %d, lat_ns = %d)\n",
   17559 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   17560 			return -1;
   17561 		}
   17562 	}
   17563 	/* Snoop and No-Snoop latencies the same */
   17564 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   17565 	CSR_WRITE(sc, WMREG_LTRV, reg);
   17566 
   17567 	/* Set OBFF high water mark */
   17568 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   17569 	reg |= obff_hwm;
   17570 	CSR_WRITE(sc, WMREG_SVT, reg);
   17571 
   17572 	/* Enable OBFF */
   17573 	reg = CSR_READ(sc, WMREG_SVCR);
   17574 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   17575 	CSR_WRITE(sc, WMREG_SVCR, reg);
   17576 
   17577 	return 0;
   17578 }
   17579 
   17580 /*
   17581  * I210 Errata 25 and I211 Errata 10
   17582  * Slow System Clock.
   17583  *
   17584  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   17585  */
   17586 static int
   17587 wm_pll_workaround_i210(struct wm_softc *sc)
   17588 {
   17589 	uint32_t mdicnfg, wuc;
   17590 	uint32_t reg;
   17591 	pcireg_t pcireg;
   17592 	uint32_t pmreg;
   17593 	uint16_t nvmword, tmp_nvmword;
   17594 	uint16_t phyval;
   17595 	bool wa_done = false;
   17596 	int i, rv = 0;
   17597 
   17598 	/* Get Power Management cap offset */
   17599 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   17600 	    &pmreg, NULL) == 0)
   17601 		return -1;
   17602 
   17603 	/* Save WUC and MDICNFG registers */
   17604 	wuc = CSR_READ(sc, WMREG_WUC);
   17605 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   17606 
   17607 	reg = mdicnfg & ~MDICNFG_DEST;
   17608 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17609 
   17610 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   17611 		/*
   17612 		 * The default value of the Initialization Control Word 1
   17613 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   17614 		 */
   17615 		nvmword = INVM_DEFAULT_AL;
   17616 	}
   17617 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   17618 
   17619 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   17620 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   17621 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   17622 
   17623 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   17624 			rv = 0;
   17625 			break; /* OK */
   17626 		} else
   17627 			rv = -1;
   17628 
   17629 		wa_done = true;
   17630 		/* Directly reset the internal PHY */
   17631 		reg = CSR_READ(sc, WMREG_CTRL);
   17632 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   17633 
   17634 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17635 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   17636 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17637 
   17638 		CSR_WRITE(sc, WMREG_WUC, 0);
   17639 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   17640 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17641 
   17642 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17643 		    pmreg + PCI_PMCSR);
   17644 		pcireg |= PCI_PMCSR_STATE_D3;
   17645 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17646 		    pmreg + PCI_PMCSR, pcireg);
   17647 		delay(1000);
   17648 		pcireg &= ~PCI_PMCSR_STATE_D3;
   17649 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17650 		    pmreg + PCI_PMCSR, pcireg);
   17651 
   17652 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   17653 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17654 
   17655 		/* Restore WUC register */
   17656 		CSR_WRITE(sc, WMREG_WUC, wuc);
   17657 	}
   17658 
   17659 	/* Restore MDICNFG setting */
   17660 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   17661 	if (wa_done)
   17662 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   17663 	return rv;
   17664 }
   17665 
   17666 static void
   17667 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   17668 {
   17669 	uint32_t reg;
   17670 
   17671 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17672 		device_xname(sc->sc_dev), __func__));
   17673 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   17674 	    || (sc->sc_type == WM_T_PCH_CNP));
   17675 
   17676 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   17677 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   17678 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17679 
   17680 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17681 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17682 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17683 }
   17684 
   17685 /* Sysctl functions */
   17686 static int
   17687 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
   17688 {
   17689 	struct sysctlnode node = *rnode;
   17690 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17691 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17692 	struct wm_softc *sc = txq->txq_sc;
   17693 	uint32_t reg;
   17694 
   17695 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
   17696 	node.sysctl_data = &reg;
   17697 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17698 }
   17699 
   17700 static int
   17701 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
   17702 {
   17703 	struct sysctlnode node = *rnode;
   17704 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17705 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17706 	struct wm_softc *sc = txq->txq_sc;
   17707 	uint32_t reg;
   17708 
   17709 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
   17710 	node.sysctl_data = &reg;
   17711 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17712 }
   17713 
   17714 #ifdef WM_DEBUG
   17715 static int
   17716 wm_sysctl_debug(SYSCTLFN_ARGS)
   17717 {
   17718 	struct sysctlnode node = *rnode;
   17719 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17720 	uint32_t dflags;
   17721 	int error;
   17722 
   17723 	dflags = sc->sc_debug;
   17724 	node.sysctl_data = &dflags;
   17725 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17726 
   17727 	if (error || newp == NULL)
   17728 		return error;
   17729 
   17730 	sc->sc_debug = dflags;
   17731 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
   17732 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
   17733 
   17734 	return 0;
   17735 }
   17736 #endif
   17737