Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.754
      1 /*	$NetBSD: if_wm.c,v 1.754 2022/08/08 07:44:40 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.754 2022/08/08 07:44:40 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 
     94 #include <sys/atomic.h>
     95 #include <sys/callout.h>
     96 #include <sys/cpu.h>
     97 #include <sys/device.h>
     98 #include <sys/errno.h>
     99 #include <sys/interrupt.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/kernel.h>
    102 #include <sys/kmem.h>
    103 #include <sys/mbuf.h>
    104 #include <sys/pcq.h>
    105 #include <sys/queue.h>
    106 #include <sys/rndsource.h>
    107 #include <sys/socket.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/syslog.h>
    110 #include <sys/systm.h>
    111 #include <sys/workqueue.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <net/rss_config.h>
    121 
    122 #include <netinet/in.h>			/* XXX for struct ip */
    123 #include <netinet/in_systm.h>		/* XXX for struct ip */
    124 #include <netinet/ip.h>			/* XXX for struct ip */
    125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    127 
    128 #include <sys/bus.h>
    129 #include <sys/intr.h>
    130 #include <machine/endian.h>
    131 
    132 #include <dev/mii/mii.h>
    133 #include <dev/mii/mdio.h>
    134 #include <dev/mii/miivar.h>
    135 #include <dev/mii/miidevs.h>
    136 #include <dev/mii/mii_bitbang.h>
    137 #include <dev/mii/ikphyreg.h>
    138 #include <dev/mii/igphyreg.h>
    139 #include <dev/mii/igphyvar.h>
    140 #include <dev/mii/inbmphyreg.h>
    141 #include <dev/mii/ihphyreg.h>
    142 #include <dev/mii/makphyreg.h>
    143 
    144 #include <dev/pci/pcireg.h>
    145 #include <dev/pci/pcivar.h>
    146 #include <dev/pci/pcidevs.h>
    147 
    148 #include <dev/pci/if_wmreg.h>
    149 #include <dev/pci/if_wmvar.h>
    150 
    151 #ifdef WM_DEBUG
    152 #define	WM_DEBUG_LINK		__BIT(0)
    153 #define	WM_DEBUG_TX		__BIT(1)
    154 #define	WM_DEBUG_RX		__BIT(2)
    155 #define	WM_DEBUG_GMII		__BIT(3)
    156 #define	WM_DEBUG_MANAGE		__BIT(4)
    157 #define	WM_DEBUG_NVM		__BIT(5)
    158 #define	WM_DEBUG_INIT		__BIT(6)
    159 #define	WM_DEBUG_LOCK		__BIT(7)
    160 
    161 #if 0
    162 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    163 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    164 	WM_DEBUG_LOCK
    165 #endif
    166 
    167 #define	DPRINTF(sc, x, y)			  \
    168 	do {					  \
    169 		if ((sc)->sc_debug & (x))	  \
    170 			printf y;		  \
    171 	} while (0)
    172 #else
    173 #define	DPRINTF(sc, x, y)	__nothing
    174 #endif /* WM_DEBUG */
    175 
    176 #ifdef NET_MPSAFE
    177 #define WM_MPSAFE	1
    178 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    179 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    180 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    181 #else
    182 #define WM_CALLOUT_FLAGS	0
    183 #define WM_SOFTINT_FLAGS	0
    184 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    185 #endif
    186 
    187 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    188 
    189 /*
    190  * This device driver's max interrupt numbers.
    191  */
    192 #define WM_MAX_NQUEUEINTR	16
    193 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    194 
    195 #ifndef WM_DISABLE_MSI
    196 #define	WM_DISABLE_MSI 0
    197 #endif
    198 #ifndef WM_DISABLE_MSIX
    199 #define	WM_DISABLE_MSIX 0
    200 #endif
    201 
    202 int wm_disable_msi = WM_DISABLE_MSI;
    203 int wm_disable_msix = WM_DISABLE_MSIX;
    204 
    205 #ifndef WM_WATCHDOG_TIMEOUT
    206 #define WM_WATCHDOG_TIMEOUT 5
    207 #endif
    208 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    209 
    210 /*
    211  * Transmit descriptor list size.  Due to errata, we can only have
    212  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    213  * on >= 82544. We tell the upper layers that they can queue a lot
    214  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    215  * of them at a time.
    216  *
    217  * We allow up to 64 DMA segments per packet.  Pathological packet
    218  * chains containing many small mbufs have been observed in zero-copy
    219  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    220  * m_defrag() is called to reduce it.
    221  */
    222 #define	WM_NTXSEGS		64
    223 #define	WM_IFQUEUELEN		256
    224 #define	WM_TXQUEUELEN_MAX	64
    225 #define	WM_TXQUEUELEN_MAX_82547	16
    226 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    227 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    228 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    229 #define	WM_NTXDESC_82542	256
    230 #define	WM_NTXDESC_82544	4096
    231 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    232 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    233 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    234 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    235 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    236 
    237 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    238 
    239 #define	WM_TXINTERQSIZE		256
    240 
    241 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    242 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    243 #endif
    244 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    245 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    246 #endif
    247 
    248 /*
    249  * Receive descriptor list size.  We have one Rx buffer for normal
    250  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    251  * packet.  We allocate 256 receive descriptors, each with a 2k
    252  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    253  */
    254 #define	WM_NRXDESC		256U
    255 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    256 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    257 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    258 
    259 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    260 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    261 #endif
    262 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    263 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    264 #endif
    265 
    266 typedef union txdescs {
    267 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    268 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    269 } txdescs_t;
    270 
    271 typedef union rxdescs {
    272 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    273 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    274 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    275 } rxdescs_t;
    276 
    277 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    278 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    279 
    280 /*
    281  * Software state for transmit jobs.
    282  */
    283 struct wm_txsoft {
    284 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    285 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    286 	int txs_firstdesc;		/* first descriptor in packet */
    287 	int txs_lastdesc;		/* last descriptor in packet */
    288 	int txs_ndesc;			/* # of descriptors used */
    289 };
    290 
    291 /*
    292  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    293  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    294  * them together.
    295  */
    296 struct wm_rxsoft {
    297 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    298 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    299 };
    300 
    301 #define WM_LINKUP_TIMEOUT	50
    302 
    303 static uint16_t swfwphysem[] = {
    304 	SWFW_PHY0_SM,
    305 	SWFW_PHY1_SM,
    306 	SWFW_PHY2_SM,
    307 	SWFW_PHY3_SM
    308 };
    309 
    310 static const uint32_t wm_82580_rxpbs_table[] = {
    311 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    312 };
    313 
    314 struct wm_softc;
    315 
    316 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    317 #if !defined(WM_EVENT_COUNTERS)
    318 #define WM_EVENT_COUNTERS 1
    319 #endif
    320 #endif
    321 
    322 #ifdef WM_EVENT_COUNTERS
    323 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
    324 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    325 	struct evcnt qname##_ev_##evname
    326 
    327 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    328 	do {								\
    329 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    330 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    331 		    "%s%02d%s", #qname, (qnum), #evname);		\
    332 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    333 		    (evtype), NULL, (xname),				\
    334 		    (q)->qname##_##evname##_evcnt_name);		\
    335 	} while (0)
    336 
    337 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    338 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    339 
    340 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    341 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    342 
    343 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    344 	evcnt_detach(&(q)->qname##_ev_##evname)
    345 #endif /* WM_EVENT_COUNTERS */
    346 
    347 struct wm_txqueue {
    348 	kmutex_t *txq_lock;		/* lock for tx operations */
    349 
    350 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    351 
    352 	/* Software state for the transmit descriptors. */
    353 	int txq_num;			/* must be a power of two */
    354 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    355 
    356 	/* TX control data structures. */
    357 	int txq_ndesc;			/* must be a power of two */
    358 	size_t txq_descsize;		/* a tx descriptor size */
    359 	txdescs_t *txq_descs_u;
    360 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    361 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    362 	int txq_desc_rseg;		/* real number of control segment */
    363 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    364 #define	txq_descs	txq_descs_u->sctxu_txdescs
    365 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    366 
    367 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    368 
    369 	int txq_free;			/* number of free Tx descriptors */
    370 	int txq_next;			/* next ready Tx descriptor */
    371 
    372 	int txq_sfree;			/* number of free Tx jobs */
    373 	int txq_snext;			/* next free Tx job */
    374 	int txq_sdirty;			/* dirty Tx jobs */
    375 
    376 	/* These 4 variables are used only on the 82547. */
    377 	int txq_fifo_size;		/* Tx FIFO size */
    378 	int txq_fifo_head;		/* current head of FIFO */
    379 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    380 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    381 
    382 	/*
    383 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    384 	 * CPUs. This queue intermediate them without block.
    385 	 */
    386 	pcq_t *txq_interq;
    387 
    388 	/*
    389 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    390 	 * to manage Tx H/W queue's busy flag.
    391 	 */
    392 	int txq_flags;			/* flags for H/W queue, see below */
    393 #define	WM_TXQ_NO_SPACE		0x1
    394 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    395 
    396 	bool txq_stopping;
    397 
    398 	bool txq_sending;
    399 	time_t txq_lastsent;
    400 
    401 	/* Checksum flags used for previous packet */
    402 	uint32_t	txq_last_hw_cmd;
    403 	uint8_t		txq_last_hw_fields;
    404 	uint16_t	txq_last_hw_ipcs;
    405 	uint16_t	txq_last_hw_tucs;
    406 
    407 	uint32_t txq_packets;		/* for AIM */
    408 	uint32_t txq_bytes;		/* for AIM */
    409 #ifdef WM_EVENT_COUNTERS
    410 	/* TX event counters */
    411 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
    412 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
    413 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
    414 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
    415 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
    416 					    /* XXX not used? */
    417 
    418 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
    419 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
    420 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
    421 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
    422 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
    423 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
    424 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
    425 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
    426 					    /* other than toomanyseg */
    427 
    428 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
    429 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
    430 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
    431 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
    432 
    433 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    434 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    435 #endif /* WM_EVENT_COUNTERS */
    436 };
    437 
    438 struct wm_rxqueue {
    439 	kmutex_t *rxq_lock;		/* lock for rx operations */
    440 
    441 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    442 
    443 	/* Software state for the receive descriptors. */
    444 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    445 
    446 	/* RX control data structures. */
    447 	int rxq_ndesc;			/* must be a power of two */
    448 	size_t rxq_descsize;		/* a rx descriptor size */
    449 	rxdescs_t *rxq_descs_u;
    450 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    451 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    452 	int rxq_desc_rseg;		/* real number of control segment */
    453 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    454 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    455 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    456 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    457 
    458 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    459 
    460 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    461 	int rxq_discard;
    462 	int rxq_len;
    463 	struct mbuf *rxq_head;
    464 	struct mbuf *rxq_tail;
    465 	struct mbuf **rxq_tailp;
    466 
    467 	bool rxq_stopping;
    468 
    469 	uint32_t rxq_packets;		/* for AIM */
    470 	uint32_t rxq_bytes;		/* for AIM */
    471 #ifdef WM_EVENT_COUNTERS
    472 	/* RX event counters */
    473 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    474 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    475 
    476 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    477 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    478 #endif
    479 };
    480 
    481 struct wm_queue {
    482 	int wmq_id;			/* index of TX/RX queues */
    483 	int wmq_intr_idx;		/* index of MSI-X tables */
    484 
    485 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    486 	bool wmq_set_itr;
    487 
    488 	struct wm_txqueue wmq_txq;
    489 	struct wm_rxqueue wmq_rxq;
    490 	char sysctlname[32];		/* Name for sysctl */
    491 
    492 	bool wmq_txrx_use_workqueue;
    493 	struct work wmq_cookie;
    494 	void *wmq_si;
    495 };
    496 
    497 struct wm_phyop {
    498 	int (*acquire)(struct wm_softc *);
    499 	void (*release)(struct wm_softc *);
    500 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    501 	int (*writereg_locked)(device_t, int, int, uint16_t);
    502 	int reset_delay_us;
    503 	bool no_errprint;
    504 };
    505 
    506 struct wm_nvmop {
    507 	int (*acquire)(struct wm_softc *);
    508 	void (*release)(struct wm_softc *);
    509 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    510 };
    511 
    512 /*
    513  * Software state per device.
    514  */
    515 struct wm_softc {
    516 	device_t sc_dev;		/* generic device information */
    517 	bus_space_tag_t sc_st;		/* bus space tag */
    518 	bus_space_handle_t sc_sh;	/* bus space handle */
    519 	bus_size_t sc_ss;		/* bus space size */
    520 	bus_space_tag_t sc_iot;		/* I/O space tag */
    521 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    522 	bus_size_t sc_ios;		/* I/O space size */
    523 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    524 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    525 	bus_size_t sc_flashs;		/* flash registers space size */
    526 	off_t sc_flashreg_offset;	/*
    527 					 * offset to flash registers from
    528 					 * start of BAR
    529 					 */
    530 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    531 
    532 	struct ethercom sc_ethercom;	/* Ethernet common data */
    533 	struct mii_data sc_mii;		/* MII/media information */
    534 
    535 	pci_chipset_tag_t sc_pc;
    536 	pcitag_t sc_pcitag;
    537 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    538 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    539 
    540 	uint16_t sc_pcidevid;		/* PCI device ID */
    541 	wm_chip_type sc_type;		/* MAC type */
    542 	int sc_rev;			/* MAC revision */
    543 	wm_phy_type sc_phytype;		/* PHY type */
    544 	uint8_t sc_sfptype;		/* SFP type */
    545 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    546 #define	WM_MEDIATYPE_UNKNOWN		0x00
    547 #define	WM_MEDIATYPE_FIBER		0x01
    548 #define	WM_MEDIATYPE_COPPER		0x02
    549 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    550 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    551 	int sc_flags;			/* flags; see below */
    552 	u_short sc_if_flags;		/* last if_flags */
    553 	int sc_ec_capenable;		/* last ec_capenable */
    554 	int sc_flowflags;		/* 802.3x flow control flags */
    555 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    556 	int sc_align_tweak;
    557 
    558 	void *sc_ihs[WM_MAX_NINTR];	/*
    559 					 * interrupt cookie.
    560 					 * - legacy and msi use sc_ihs[0] only
    561 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    562 					 */
    563 	pci_intr_handle_t *sc_intrs;	/*
    564 					 * legacy and msi use sc_intrs[0] only
    565 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    566 					 */
    567 	int sc_nintrs;			/* number of interrupts */
    568 
    569 	int sc_link_intr_idx;		/* index of MSI-X tables */
    570 
    571 	callout_t sc_tick_ch;		/* tick callout */
    572 	bool sc_core_stopping;
    573 
    574 	int sc_nvm_ver_major;
    575 	int sc_nvm_ver_minor;
    576 	int sc_nvm_ver_build;
    577 	int sc_nvm_addrbits;		/* NVM address bits */
    578 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    579 	int sc_ich8_flash_base;
    580 	int sc_ich8_flash_bank_size;
    581 	int sc_nvm_k1_enabled;
    582 
    583 	int sc_nqueues;
    584 	struct wm_queue *sc_queue;
    585 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    586 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    587 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    588 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    589 	struct workqueue *sc_queue_wq;
    590 	bool sc_txrx_use_workqueue;
    591 
    592 	int sc_affinity_offset;
    593 
    594 #ifdef WM_EVENT_COUNTERS
    595 	/* Event counters. */
    596 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    597 
    598 	/* >= WM_T_82542_2_1 */
    599 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    600 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    601 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    602 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    603 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    604 
    605 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
    606 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
    607 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
    608 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
    609 	struct evcnt sc_ev_mpc;		/* Missed Packets */
    610 	struct evcnt sc_ev_colc;	/* Collision */
    611 	struct evcnt sc_ev_sec;		/* Sequence Error */
    612 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
    613 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
    614 	struct evcnt sc_ev_scc;		/* Single Collision */
    615 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
    616 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
    617 	struct evcnt sc_ev_latecol;	/* Late Collision */
    618 	struct evcnt sc_ev_dc;		/* Defer */
    619 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
    620 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
    621 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
    622 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
    623 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
    624 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
    625 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
    626 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
    627 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
    628 	struct evcnt sc_ev_roc;		/* Rx Oversize */
    629 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
    630 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
    631 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
    632 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
    633 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
    634 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
    635 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx Count */
    636 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
    637 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
    638 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
    639 	struct evcnt sc_ev_prc511;	/* Packets Rx (255-511 bytes) */
    640 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
    641 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
    642 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
    643 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
    644 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
    645 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
    646 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
    647 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
    648 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
    649 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
    650 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
    651 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
    652 	struct evcnt sc_ev_ictxact;	/* Intr. Cause Tx Abs Timer Expire */
    653 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
    654 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
    655 	struct evcnt sc_ev_icrxdmtc;	/* Intr. Cause Rx Desc Min Thresh */
    656 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
    657 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
    658 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
    659 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
    660 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
    661 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
    662 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
    663 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
    664 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
    665 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
    666 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
    667 
    668 #endif /* WM_EVENT_COUNTERS */
    669 
    670 	struct sysctllog *sc_sysctllog;
    671 
    672 	/* This variable are used only on the 82547. */
    673 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    674 
    675 	uint32_t sc_ctrl;		/* prototype CTRL register */
    676 #if 0
    677 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    678 #endif
    679 	uint32_t sc_icr;		/* prototype interrupt bits */
    680 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    681 	uint32_t sc_tctl;		/* prototype TCTL register */
    682 	uint32_t sc_rctl;		/* prototype RCTL register */
    683 	uint32_t sc_txcw;		/* prototype TXCW register */
    684 	uint32_t sc_tipg;		/* prototype TIPG register */
    685 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    686 	uint32_t sc_pba;		/* prototype PBA register */
    687 
    688 	int sc_tbi_linkup;		/* TBI link status */
    689 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    690 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    691 
    692 	int sc_mchash_type;		/* multicast filter offset */
    693 
    694 	krndsource_t rnd_source;	/* random source */
    695 
    696 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    697 
    698 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    699 	kmutex_t *sc_ich_phymtx;	/*
    700 					 * 82574/82583/ICH/PCH specific PHY
    701 					 * mutex. For 82574/82583, the mutex
    702 					 * is used for both PHY and NVM.
    703 					 */
    704 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    705 
    706 	struct wm_phyop phy;
    707 	struct wm_nvmop nvm;
    708 #ifdef WM_DEBUG
    709 	uint32_t sc_debug;
    710 #endif
    711 };
    712 
    713 #define WM_CORE_LOCK(_sc)						\
    714 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    715 #define WM_CORE_UNLOCK(_sc)						\
    716 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    717 #define WM_CORE_LOCKED(_sc)						\
    718 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    719 
    720 #define	WM_RXCHAIN_RESET(rxq)						\
    721 do {									\
    722 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    723 	*(rxq)->rxq_tailp = NULL;					\
    724 	(rxq)->rxq_len = 0;						\
    725 } while (/*CONSTCOND*/0)
    726 
    727 #define	WM_RXCHAIN_LINK(rxq, m)						\
    728 do {									\
    729 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    730 	(rxq)->rxq_tailp = &(m)->m_next;				\
    731 } while (/*CONSTCOND*/0)
    732 
    733 #ifdef WM_EVENT_COUNTERS
    734 #ifdef __HAVE_ATOMIC64_LOADSTORE
    735 #define	WM_EVCNT_INCR(ev)						\
    736 	atomic_store_relaxed(&((ev)->ev_count),				\
    737 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    738 #define	WM_EVCNT_ADD(ev, val)						\
    739 	atomic_store_relaxed(&((ev)->ev_count),				\
    740 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    741 #else
    742 #define	WM_EVCNT_INCR(ev)						\
    743 	((ev)->ev_count)++
    744 #define	WM_EVCNT_ADD(ev, val)						\
    745 	(ev)->ev_count += (val)
    746 #endif
    747 
    748 #define WM_Q_EVCNT_INCR(qname, evname)			\
    749 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    750 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    751 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    752 #else /* !WM_EVENT_COUNTERS */
    753 #define	WM_EVCNT_INCR(ev)	/* nothing */
    754 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    755 
    756 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    757 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    758 #endif /* !WM_EVENT_COUNTERS */
    759 
    760 #define	CSR_READ(sc, reg)						\
    761 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    762 #define	CSR_WRITE(sc, reg, val)						\
    763 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    764 #define	CSR_WRITE_FLUSH(sc)						\
    765 	(void)CSR_READ((sc), WMREG_STATUS)
    766 
    767 #define ICH8_FLASH_READ32(sc, reg)					\
    768 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    769 	    (reg) + sc->sc_flashreg_offset)
    770 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    771 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    772 	    (reg) + sc->sc_flashreg_offset, (data))
    773 
    774 #define ICH8_FLASH_READ16(sc, reg)					\
    775 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    776 	    (reg) + sc->sc_flashreg_offset)
    777 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    778 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    779 	    (reg) + sc->sc_flashreg_offset, (data))
    780 
    781 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    782 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    783 
    784 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    785 #define	WM_CDTXADDR_HI(txq, x)						\
    786 	(sizeof(bus_addr_t) == 8 ?					\
    787 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    788 
    789 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    790 #define	WM_CDRXADDR_HI(rxq, x)						\
    791 	(sizeof(bus_addr_t) == 8 ?					\
    792 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    793 
    794 /*
    795  * Register read/write functions.
    796  * Other than CSR_{READ|WRITE}().
    797  */
    798 #if 0
    799 static inline uint32_t wm_io_read(struct wm_softc *, int);
    800 #endif
    801 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    802 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    803     uint32_t, uint32_t);
    804 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    805 
    806 /*
    807  * Descriptor sync/init functions.
    808  */
    809 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    810 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    811 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    812 
    813 /*
    814  * Device driver interface functions and commonly used functions.
    815  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    816  */
    817 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    818 static int	wm_match(device_t, cfdata_t, void *);
    819 static void	wm_attach(device_t, device_t, void *);
    820 static int	wm_detach(device_t, int);
    821 static bool	wm_suspend(device_t, const pmf_qual_t *);
    822 static bool	wm_resume(device_t, const pmf_qual_t *);
    823 static void	wm_watchdog(struct ifnet *);
    824 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    825     uint16_t *);
    826 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    827     uint16_t *);
    828 static void	wm_tick(void *);
    829 static int	wm_ifflags_cb(struct ethercom *);
    830 static int	wm_ioctl(struct ifnet *, u_long, void *);
    831 /* MAC address related */
    832 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    833 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    834 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    835 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    836 static int	wm_rar_count(struct wm_softc *);
    837 static void	wm_set_filter(struct wm_softc *);
    838 /* Reset and init related */
    839 static void	wm_set_vlan(struct wm_softc *);
    840 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    841 static void	wm_get_auto_rd_done(struct wm_softc *);
    842 static void	wm_lan_init_done(struct wm_softc *);
    843 static void	wm_get_cfg_done(struct wm_softc *);
    844 static int	wm_phy_post_reset(struct wm_softc *);
    845 static int	wm_write_smbus_addr(struct wm_softc *);
    846 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    847 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    848 static void	wm_initialize_hardware_bits(struct wm_softc *);
    849 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    850 static int	wm_reset_phy(struct wm_softc *);
    851 static void	wm_flush_desc_rings(struct wm_softc *);
    852 static void	wm_reset(struct wm_softc *);
    853 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    854 static void	wm_rxdrain(struct wm_rxqueue *);
    855 static void	wm_init_rss(struct wm_softc *);
    856 static void	wm_adjust_qnum(struct wm_softc *, int);
    857 static inline bool	wm_is_using_msix(struct wm_softc *);
    858 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    859 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    860 static int	wm_setup_legacy(struct wm_softc *);
    861 static int	wm_setup_msix(struct wm_softc *);
    862 static int	wm_init(struct ifnet *);
    863 static int	wm_init_locked(struct ifnet *);
    864 static void	wm_init_sysctls(struct wm_softc *);
    865 static void	wm_unset_stopping_flags(struct wm_softc *);
    866 static void	wm_set_stopping_flags(struct wm_softc *);
    867 static void	wm_stop(struct ifnet *, int);
    868 static void	wm_stop_locked(struct ifnet *, bool, bool);
    869 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    870 static void	wm_82547_txfifo_stall(void *);
    871 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    872 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    873 /* DMA related */
    874 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    875 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    876 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    877 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    878     struct wm_txqueue *);
    879 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    880 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    881 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    882     struct wm_rxqueue *);
    883 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    884 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    885 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    886 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    887 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    888 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    889 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    890     struct wm_txqueue *);
    891 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    892     struct wm_rxqueue *);
    893 static int	wm_alloc_txrx_queues(struct wm_softc *);
    894 static void	wm_free_txrx_queues(struct wm_softc *);
    895 static int	wm_init_txrx_queues(struct wm_softc *);
    896 /* Start */
    897 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    898     struct wm_txsoft *, uint32_t *, uint8_t *);
    899 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    900 static void	wm_start(struct ifnet *);
    901 static void	wm_start_locked(struct ifnet *);
    902 static int	wm_transmit(struct ifnet *, struct mbuf *);
    903 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    904 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    905 		    bool);
    906 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    907     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    908 static void	wm_nq_start(struct ifnet *);
    909 static void	wm_nq_start_locked(struct ifnet *);
    910 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    911 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    912 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    913 		    bool);
    914 static void	wm_deferred_start_locked(struct wm_txqueue *);
    915 static void	wm_handle_queue(void *);
    916 static void	wm_handle_queue_work(struct work *, void *);
    917 /* Interrupt */
    918 static bool	wm_txeof(struct wm_txqueue *, u_int);
    919 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    920 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    921 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    922 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    923 static void	wm_linkintr(struct wm_softc *, uint32_t);
    924 static int	wm_intr_legacy(void *);
    925 static inline void	wm_txrxintr_disable(struct wm_queue *);
    926 static inline void	wm_txrxintr_enable(struct wm_queue *);
    927 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    928 static int	wm_txrxintr_msix(void *);
    929 static int	wm_linkintr_msix(void *);
    930 
    931 /*
    932  * Media related.
    933  * GMII, SGMII, TBI, SERDES and SFP.
    934  */
    935 /* Common */
    936 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    937 /* GMII related */
    938 static void	wm_gmii_reset(struct wm_softc *);
    939 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    940 static int	wm_get_phy_id_82575(struct wm_softc *);
    941 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    942 static int	wm_gmii_mediachange(struct ifnet *);
    943 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    944 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    945 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    946 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    947 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    948 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    949 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    950 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    951 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    952 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    953 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    954 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    955 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    956 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    957 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    958 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    959 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    960 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    961 	bool);
    962 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    963 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    964 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    965 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    966 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    967 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    968 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    969 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    970 static void	wm_gmii_statchg(struct ifnet *);
    971 /*
    972  * kumeran related (80003, ICH* and PCH*).
    973  * These functions are not for accessing MII registers but for accessing
    974  * kumeran specific registers.
    975  */
    976 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    977 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    978 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    979 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    980 /* EMI register related */
    981 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    982 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    983 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    984 /* SGMII */
    985 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    986 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    987 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    988 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    989 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    990 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    991 /* TBI related */
    992 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    993 static void	wm_tbi_mediainit(struct wm_softc *);
    994 static int	wm_tbi_mediachange(struct ifnet *);
    995 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    996 static int	wm_check_for_link(struct wm_softc *);
    997 static void	wm_tbi_tick(struct wm_softc *);
    998 /* SERDES related */
    999 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
   1000 static int	wm_serdes_mediachange(struct ifnet *);
   1001 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
   1002 static void	wm_serdes_tick(struct wm_softc *);
   1003 /* SFP related */
   1004 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
   1005 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
   1006 
   1007 /*
   1008  * NVM related.
   1009  * Microwire, SPI (w/wo EERD) and Flash.
   1010  */
   1011 /* Misc functions */
   1012 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
   1013 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
   1014 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
   1015 /* Microwire */
   1016 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
   1017 /* SPI */
   1018 static int	wm_nvm_ready_spi(struct wm_softc *);
   1019 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
   1020 /* Using with EERD */
   1021 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
   1022 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
   1023 /* Flash */
   1024 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
   1025     unsigned int *);
   1026 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
   1027 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
   1028 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
   1029     uint32_t *);
   1030 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
   1031 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
   1032 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
   1033 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
   1034 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
   1035 /* iNVM */
   1036 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
   1037 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
   1038 /* Lock, detecting NVM type, validate checksum and read */
   1039 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
   1040 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
   1041 static int	wm_nvm_validate_checksum(struct wm_softc *);
   1042 static void	wm_nvm_version_invm(struct wm_softc *);
   1043 static void	wm_nvm_version(struct wm_softc *);
   1044 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
   1045 
   1046 /*
   1047  * Hardware semaphores.
   1048  * Very complexed...
   1049  */
   1050 static int	wm_get_null(struct wm_softc *);
   1051 static void	wm_put_null(struct wm_softc *);
   1052 static int	wm_get_eecd(struct wm_softc *);
   1053 static void	wm_put_eecd(struct wm_softc *);
   1054 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
   1055 static void	wm_put_swsm_semaphore(struct wm_softc *);
   1056 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
   1057 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
   1058 static int	wm_get_nvm_80003(struct wm_softc *);
   1059 static void	wm_put_nvm_80003(struct wm_softc *);
   1060 static int	wm_get_nvm_82571(struct wm_softc *);
   1061 static void	wm_put_nvm_82571(struct wm_softc *);
   1062 static int	wm_get_phy_82575(struct wm_softc *);
   1063 static void	wm_put_phy_82575(struct wm_softc *);
   1064 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1065 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1066 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1067 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1068 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1069 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1070 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1071 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1072 
   1073 /*
   1074  * Management mode and power management related subroutines.
   1075  * BMC, AMT, suspend/resume and EEE.
   1076  */
   1077 #if 0
   1078 static int	wm_check_mng_mode(struct wm_softc *);
   1079 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1080 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1081 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1082 #endif
   1083 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1084 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1085 static void	wm_get_hw_control(struct wm_softc *);
   1086 static void	wm_release_hw_control(struct wm_softc *);
   1087 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1088 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1089 static void	wm_init_manageability(struct wm_softc *);
   1090 static void	wm_release_manageability(struct wm_softc *);
   1091 static void	wm_get_wakeup(struct wm_softc *);
   1092 static int	wm_ulp_disable(struct wm_softc *);
   1093 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1094 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1095 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1096 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1097 static void	wm_enable_wakeup(struct wm_softc *);
   1098 static void	wm_disable_aspm(struct wm_softc *);
   1099 /* LPLU (Low Power Link Up) */
   1100 static void	wm_lplu_d0_disable(struct wm_softc *);
   1101 /* EEE */
   1102 static int	wm_set_eee_i350(struct wm_softc *);
   1103 static int	wm_set_eee_pchlan(struct wm_softc *);
   1104 static int	wm_set_eee(struct wm_softc *);
   1105 
   1106 /*
   1107  * Workarounds (mainly PHY related).
   1108  * Basically, PHY's workarounds are in the PHY drivers.
   1109  */
   1110 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1111 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1112 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1113 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1114 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1115 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1116 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1117 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1118 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1119 static int	wm_k1_workaround_lv(struct wm_softc *);
   1120 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1121 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1122 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1123 static void	wm_reset_init_script_82575(struct wm_softc *);
   1124 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1125 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1126 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1127 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1128 static int	wm_pll_workaround_i210(struct wm_softc *);
   1129 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1130 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1131 static void	wm_set_linkdown_discard(struct wm_softc *);
   1132 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1133 
   1134 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
   1135 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
   1136 #ifdef WM_DEBUG
   1137 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1138 #endif
   1139 
   1140 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1141     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1142 
   1143 /*
   1144  * Devices supported by this driver.
   1145  */
   1146 static const struct wm_product {
   1147 	pci_vendor_id_t		wmp_vendor;
   1148 	pci_product_id_t	wmp_product;
   1149 	const char		*wmp_name;
   1150 	wm_chip_type		wmp_type;
   1151 	uint32_t		wmp_flags;
   1152 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1153 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1154 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1155 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1156 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1157 } wm_products[] = {
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1159 	  "Intel i82542 1000BASE-X Ethernet",
   1160 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1163 	  "Intel i82543GC 1000BASE-X Ethernet",
   1164 	  WM_T_82543,		WMP_F_FIBER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1167 	  "Intel i82543GC 1000BASE-T Ethernet",
   1168 	  WM_T_82543,		WMP_F_COPPER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1171 	  "Intel i82544EI 1000BASE-T Ethernet",
   1172 	  WM_T_82544,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1175 	  "Intel i82544EI 1000BASE-X Ethernet",
   1176 	  WM_T_82544,		WMP_F_FIBER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1179 	  "Intel i82544GC 1000BASE-T Ethernet",
   1180 	  WM_T_82544,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1183 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1184 	  WM_T_82544,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1187 	  "Intel i82540EM 1000BASE-T Ethernet",
   1188 	  WM_T_82540,		WMP_F_COPPER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1191 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1192 	  WM_T_82540,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1195 	  "Intel i82540EP 1000BASE-T Ethernet",
   1196 	  WM_T_82540,		WMP_F_COPPER },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1199 	  "Intel i82540EP 1000BASE-T Ethernet",
   1200 	  WM_T_82540,		WMP_F_COPPER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1203 	  "Intel i82540EP 1000BASE-T Ethernet",
   1204 	  WM_T_82540,		WMP_F_COPPER },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1207 	  "Intel i82545EM 1000BASE-T Ethernet",
   1208 	  WM_T_82545,		WMP_F_COPPER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1211 	  "Intel i82545GM 1000BASE-T Ethernet",
   1212 	  WM_T_82545_3,		WMP_F_COPPER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1215 	  "Intel i82545GM 1000BASE-X Ethernet",
   1216 	  WM_T_82545_3,		WMP_F_FIBER },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1219 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1220 	  WM_T_82545_3,		WMP_F_SERDES },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1223 	  "Intel i82546EB 1000BASE-T Ethernet",
   1224 	  WM_T_82546,		WMP_F_COPPER },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1227 	  "Intel i82546EB 1000BASE-T Ethernet",
   1228 	  WM_T_82546,		WMP_F_COPPER },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1231 	  "Intel i82545EM 1000BASE-X Ethernet",
   1232 	  WM_T_82545,		WMP_F_FIBER },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1235 	  "Intel i82546EB 1000BASE-X Ethernet",
   1236 	  WM_T_82546,		WMP_F_FIBER },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1239 	  "Intel i82546GB 1000BASE-T Ethernet",
   1240 	  WM_T_82546_3,		WMP_F_COPPER },
   1241 
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1243 	  "Intel i82546GB 1000BASE-X Ethernet",
   1244 	  WM_T_82546_3,		WMP_F_FIBER },
   1245 
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1247 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1248 	  WM_T_82546_3,		WMP_F_SERDES },
   1249 
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1251 	  "i82546GB quad-port Gigabit Ethernet",
   1252 	  WM_T_82546_3,		WMP_F_COPPER },
   1253 
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1255 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1256 	  WM_T_82546_3,		WMP_F_COPPER },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1259 	  "Intel PRO/1000MT (82546GB)",
   1260 	  WM_T_82546_3,		WMP_F_COPPER },
   1261 
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1263 	  "Intel i82541EI 1000BASE-T Ethernet",
   1264 	  WM_T_82541,		WMP_F_COPPER },
   1265 
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1267 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1268 	  WM_T_82541,		WMP_F_COPPER },
   1269 
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1271 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1272 	  WM_T_82541,		WMP_F_COPPER },
   1273 
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1275 	  "Intel i82541ER 1000BASE-T Ethernet",
   1276 	  WM_T_82541_2,		WMP_F_COPPER },
   1277 
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1279 	  "Intel i82541GI 1000BASE-T Ethernet",
   1280 	  WM_T_82541_2,		WMP_F_COPPER },
   1281 
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1283 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1284 	  WM_T_82541_2,		WMP_F_COPPER },
   1285 
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1287 	  "Intel i82541PI 1000BASE-T Ethernet",
   1288 	  WM_T_82541_2,		WMP_F_COPPER },
   1289 
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1291 	  "Intel i82547EI 1000BASE-T Ethernet",
   1292 	  WM_T_82547,		WMP_F_COPPER },
   1293 
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1295 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1296 	  WM_T_82547,		WMP_F_COPPER },
   1297 
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1299 	  "Intel i82547GI 1000BASE-T Ethernet",
   1300 	  WM_T_82547_2,		WMP_F_COPPER },
   1301 
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1303 	  "Intel PRO/1000 PT (82571EB)",
   1304 	  WM_T_82571,		WMP_F_COPPER },
   1305 
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1307 	  "Intel PRO/1000 PF (82571EB)",
   1308 	  WM_T_82571,		WMP_F_FIBER },
   1309 
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1311 	  "Intel PRO/1000 PB (82571EB)",
   1312 	  WM_T_82571,		WMP_F_SERDES },
   1313 
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1315 	  "Intel PRO/1000 QT (82571EB)",
   1316 	  WM_T_82571,		WMP_F_COPPER },
   1317 
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1319 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1320 	  WM_T_82571,		WMP_F_COPPER },
   1321 
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1323 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1324 	  WM_T_82571,		WMP_F_COPPER },
   1325 
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1327 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1328 	  WM_T_82571,		WMP_F_SERDES },
   1329 
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1331 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1332 	  WM_T_82571,		WMP_F_SERDES },
   1333 
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1335 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1336 	  WM_T_82571,		WMP_F_FIBER },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1339 	  "Intel i82572EI 1000baseT Ethernet",
   1340 	  WM_T_82572,		WMP_F_COPPER },
   1341 
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1343 	  "Intel i82572EI 1000baseX Ethernet",
   1344 	  WM_T_82572,		WMP_F_FIBER },
   1345 
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1347 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1348 	  WM_T_82572,		WMP_F_SERDES },
   1349 
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1351 	  "Intel i82572EI 1000baseT Ethernet",
   1352 	  WM_T_82572,		WMP_F_COPPER },
   1353 
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1355 	  "Intel i82573E",
   1356 	  WM_T_82573,		WMP_F_COPPER },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1359 	  "Intel i82573E IAMT",
   1360 	  WM_T_82573,		WMP_F_COPPER },
   1361 
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1363 	  "Intel i82573L Gigabit Ethernet",
   1364 	  WM_T_82573,		WMP_F_COPPER },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1367 	  "Intel i82574L",
   1368 	  WM_T_82574,		WMP_F_COPPER },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1371 	  "Intel i82574L",
   1372 	  WM_T_82574,		WMP_F_COPPER },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1375 	  "Intel i82583V",
   1376 	  WM_T_82583,		WMP_F_COPPER },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1379 	  "i80003 dual 1000baseT Ethernet",
   1380 	  WM_T_80003,		WMP_F_COPPER },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1383 	  "i80003 dual 1000baseX Ethernet",
   1384 	  WM_T_80003,		WMP_F_COPPER },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1387 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1388 	  WM_T_80003,		WMP_F_SERDES },
   1389 
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1391 	  "Intel i80003 1000baseT Ethernet",
   1392 	  WM_T_80003,		WMP_F_COPPER },
   1393 
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1395 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1396 	  WM_T_80003,		WMP_F_SERDES },
   1397 
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1399 	  "Intel i82801H (M_AMT) LAN Controller",
   1400 	  WM_T_ICH8,		WMP_F_COPPER },
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1402 	  "Intel i82801H (AMT) LAN Controller",
   1403 	  WM_T_ICH8,		WMP_F_COPPER },
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1405 	  "Intel i82801H LAN Controller",
   1406 	  WM_T_ICH8,		WMP_F_COPPER },
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1408 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1409 	  WM_T_ICH8,		WMP_F_COPPER },
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1411 	  "Intel i82801H (M) LAN Controller",
   1412 	  WM_T_ICH8,		WMP_F_COPPER },
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1414 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1415 	  WM_T_ICH8,		WMP_F_COPPER },
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1417 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1418 	  WM_T_ICH8,		WMP_F_COPPER },
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1420 	  "82567V-3 LAN Controller",
   1421 	  WM_T_ICH8,		WMP_F_COPPER },
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1423 	  "82801I (AMT) LAN Controller",
   1424 	  WM_T_ICH9,		WMP_F_COPPER },
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1426 	  "82801I 10/100 LAN Controller",
   1427 	  WM_T_ICH9,		WMP_F_COPPER },
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1429 	  "82801I (G) 10/100 LAN Controller",
   1430 	  WM_T_ICH9,		WMP_F_COPPER },
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1432 	  "82801I (GT) 10/100 LAN Controller",
   1433 	  WM_T_ICH9,		WMP_F_COPPER },
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1435 	  "82801I (C) LAN Controller",
   1436 	  WM_T_ICH9,		WMP_F_COPPER },
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1438 	  "82801I mobile LAN Controller",
   1439 	  WM_T_ICH9,		WMP_F_COPPER },
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1441 	  "82801I mobile (V) LAN Controller",
   1442 	  WM_T_ICH9,		WMP_F_COPPER },
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1444 	  "82801I mobile (AMT) LAN Controller",
   1445 	  WM_T_ICH9,		WMP_F_COPPER },
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1447 	  "82567LM-4 LAN Controller",
   1448 	  WM_T_ICH9,		WMP_F_COPPER },
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1450 	  "82567LM-2 LAN Controller",
   1451 	  WM_T_ICH10,		WMP_F_COPPER },
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1453 	  "82567LF-2 LAN Controller",
   1454 	  WM_T_ICH10,		WMP_F_COPPER },
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1456 	  "82567LM-3 LAN Controller",
   1457 	  WM_T_ICH10,		WMP_F_COPPER },
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1459 	  "82567LF-3 LAN Controller",
   1460 	  WM_T_ICH10,		WMP_F_COPPER },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1462 	  "82567V-2 LAN Controller",
   1463 	  WM_T_ICH10,		WMP_F_COPPER },
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1465 	  "82567V-3? LAN Controller",
   1466 	  WM_T_ICH10,		WMP_F_COPPER },
   1467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1468 	  "HANKSVILLE LAN Controller",
   1469 	  WM_T_ICH10,		WMP_F_COPPER },
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1471 	  "PCH LAN (82577LM) Controller",
   1472 	  WM_T_PCH,		WMP_F_COPPER },
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1474 	  "PCH LAN (82577LC) Controller",
   1475 	  WM_T_PCH,		WMP_F_COPPER },
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1477 	  "PCH LAN (82578DM) Controller",
   1478 	  WM_T_PCH,		WMP_F_COPPER },
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1480 	  "PCH LAN (82578DC) Controller",
   1481 	  WM_T_PCH,		WMP_F_COPPER },
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1483 	  "PCH2 LAN (82579LM) Controller",
   1484 	  WM_T_PCH2,		WMP_F_COPPER },
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1486 	  "PCH2 LAN (82579V) Controller",
   1487 	  WM_T_PCH2,		WMP_F_COPPER },
   1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1489 	  "82575EB dual-1000baseT Ethernet",
   1490 	  WM_T_82575,		WMP_F_COPPER },
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1492 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1493 	  WM_T_82575,		WMP_F_SERDES },
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1495 	  "82575GB quad-1000baseT Ethernet",
   1496 	  WM_T_82575,		WMP_F_COPPER },
   1497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1498 	  "82575GB quad-1000baseT Ethernet (PM)",
   1499 	  WM_T_82575,		WMP_F_COPPER },
   1500 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1501 	  "82576 1000BaseT Ethernet",
   1502 	  WM_T_82576,		WMP_F_COPPER },
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1504 	  "82576 1000BaseX Ethernet",
   1505 	  WM_T_82576,		WMP_F_FIBER },
   1506 
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1508 	  "82576 gigabit Ethernet (SERDES)",
   1509 	  WM_T_82576,		WMP_F_SERDES },
   1510 
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1512 	  "82576 quad-1000BaseT Ethernet",
   1513 	  WM_T_82576,		WMP_F_COPPER },
   1514 
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1516 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1517 	  WM_T_82576,		WMP_F_COPPER },
   1518 
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1520 	  "82576 gigabit Ethernet",
   1521 	  WM_T_82576,		WMP_F_COPPER },
   1522 
   1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1524 	  "82576 gigabit Ethernet (SERDES)",
   1525 	  WM_T_82576,		WMP_F_SERDES },
   1526 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1527 	  "82576 quad-gigabit Ethernet (SERDES)",
   1528 	  WM_T_82576,		WMP_F_SERDES },
   1529 
   1530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1531 	  "82580 1000BaseT Ethernet",
   1532 	  WM_T_82580,		WMP_F_COPPER },
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1534 	  "82580 1000BaseX Ethernet",
   1535 	  WM_T_82580,		WMP_F_FIBER },
   1536 
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1538 	  "82580 1000BaseT Ethernet (SERDES)",
   1539 	  WM_T_82580,		WMP_F_SERDES },
   1540 
   1541 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1542 	  "82580 gigabit Ethernet (SGMII)",
   1543 	  WM_T_82580,		WMP_F_COPPER },
   1544 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1545 	  "82580 dual-1000BaseT Ethernet",
   1546 	  WM_T_82580,		WMP_F_COPPER },
   1547 
   1548 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1549 	  "82580 quad-1000BaseX Ethernet",
   1550 	  WM_T_82580,		WMP_F_FIBER },
   1551 
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1553 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1554 	  WM_T_82580,		WMP_F_COPPER },
   1555 
   1556 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1557 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1558 	  WM_T_82580,		WMP_F_SERDES },
   1559 
   1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1561 	  "DH89XXCC 1000BASE-KX Ethernet",
   1562 	  WM_T_82580,		WMP_F_SERDES },
   1563 
   1564 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1565 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1566 	  WM_T_82580,		WMP_F_SERDES },
   1567 
   1568 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1569 	  "I350 Gigabit Network Connection",
   1570 	  WM_T_I350,		WMP_F_COPPER },
   1571 
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1573 	  "I350 Gigabit Fiber Network Connection",
   1574 	  WM_T_I350,		WMP_F_FIBER },
   1575 
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1577 	  "I350 Gigabit Backplane Connection",
   1578 	  WM_T_I350,		WMP_F_SERDES },
   1579 
   1580 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1581 	  "I350 Quad Port Gigabit Ethernet",
   1582 	  WM_T_I350,		WMP_F_SERDES },
   1583 
   1584 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1585 	  "I350 Gigabit Connection",
   1586 	  WM_T_I350,		WMP_F_COPPER },
   1587 
   1588 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1589 	  "I354 Gigabit Ethernet (KX)",
   1590 	  WM_T_I354,		WMP_F_SERDES },
   1591 
   1592 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1593 	  "I354 Gigabit Ethernet (SGMII)",
   1594 	  WM_T_I354,		WMP_F_COPPER },
   1595 
   1596 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1597 	  "I354 Gigabit Ethernet (2.5G)",
   1598 	  WM_T_I354,		WMP_F_COPPER },
   1599 
   1600 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1601 	  "I210-T1 Ethernet Server Adapter",
   1602 	  WM_T_I210,		WMP_F_COPPER },
   1603 
   1604 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1605 	  "I210 Ethernet (Copper OEM)",
   1606 	  WM_T_I210,		WMP_F_COPPER },
   1607 
   1608 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1609 	  "I210 Ethernet (Copper IT)",
   1610 	  WM_T_I210,		WMP_F_COPPER },
   1611 
   1612 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1613 	  "I210 Ethernet (Copper, FLASH less)",
   1614 	  WM_T_I210,		WMP_F_COPPER },
   1615 
   1616 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1617 	  "I210 Gigabit Ethernet (Fiber)",
   1618 	  WM_T_I210,		WMP_F_FIBER },
   1619 
   1620 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1621 	  "I210 Gigabit Ethernet (SERDES)",
   1622 	  WM_T_I210,		WMP_F_SERDES },
   1623 
   1624 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1625 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1626 	  WM_T_I210,		WMP_F_SERDES },
   1627 
   1628 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1629 	  "I210 Gigabit Ethernet (SGMII)",
   1630 	  WM_T_I210,		WMP_F_COPPER },
   1631 
   1632 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1633 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1634 	  WM_T_I210,		WMP_F_COPPER },
   1635 
   1636 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1637 	  "I211 Ethernet (COPPER)",
   1638 	  WM_T_I211,		WMP_F_COPPER },
   1639 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1640 	  "I217 V Ethernet Connection",
   1641 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1642 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1643 	  "I217 LM Ethernet Connection",
   1644 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1645 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1646 	  "I218 V Ethernet Connection",
   1647 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1648 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1649 	  "I218 V Ethernet Connection",
   1650 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1651 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1652 	  "I218 V Ethernet Connection",
   1653 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1654 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1655 	  "I218 LM Ethernet Connection",
   1656 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1657 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1658 	  "I218 LM Ethernet Connection",
   1659 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1660 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1661 	  "I218 LM Ethernet Connection",
   1662 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1663 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1664 	  "I219 LM Ethernet Connection",
   1665 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1666 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1667 	  "I219 LM (2) Ethernet Connection",
   1668 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1669 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1670 	  "I219 LM (3) Ethernet Connection",
   1671 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1672 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1673 	  "I219 LM (4) Ethernet Connection",
   1674 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1675 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1676 	  "I219 LM (5) Ethernet Connection",
   1677 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1678 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1679 	  "I219 LM (6) Ethernet Connection",
   1680 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1681 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1682 	  "I219 LM (7) Ethernet Connection",
   1683 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1684 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1685 	  "I219 LM (8) Ethernet Connection",
   1686 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1687 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1688 	  "I219 LM (9) Ethernet Connection",
   1689 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1690 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1691 	  "I219 LM (10) Ethernet Connection",
   1692 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1693 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1694 	  "I219 LM (11) Ethernet Connection",
   1695 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1696 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1697 	  "I219 LM (12) Ethernet Connection",
   1698 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1699 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1700 	  "I219 LM (13) Ethernet Connection",
   1701 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1702 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1703 	  "I219 LM (14) Ethernet Connection",
   1704 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1705 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1706 	  "I219 LM (15) Ethernet Connection",
   1707 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1708 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1709 	  "I219 LM (16) Ethernet Connection",
   1710 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1711 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1712 	  "I219 LM (17) Ethernet Connection",
   1713 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1714 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1715 	  "I219 LM (18) Ethernet Connection",
   1716 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1717 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1718 	  "I219 LM (19) Ethernet Connection",
   1719 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1720 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1721 	  "I219 V Ethernet Connection",
   1722 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1723 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1724 	  "I219 V (2) Ethernet Connection",
   1725 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1726 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1727 	  "I219 V (4) Ethernet Connection",
   1728 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1729 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1730 	  "I219 V (5) Ethernet Connection",
   1731 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1732 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1733 	  "I219 V (6) Ethernet Connection",
   1734 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1735 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1736 	  "I219 V (7) Ethernet Connection",
   1737 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1738 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1739 	  "I219 V (8) Ethernet Connection",
   1740 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1741 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1742 	  "I219 V (9) Ethernet Connection",
   1743 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1744 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1745 	  "I219 V (10) Ethernet Connection",
   1746 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1747 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1748 	  "I219 V (11) Ethernet Connection",
   1749 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1750 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1751 	  "I219 V (12) Ethernet Connection",
   1752 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1753 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1754 	  "I219 V (13) Ethernet Connection",
   1755 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1756 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1757 	  "I219 V (14) Ethernet Connection",
   1758 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1759 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1760 	  "I219 V (15) Ethernet Connection",
   1761 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1762 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1763 	  "I219 V (16) Ethernet Connection",
   1764 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1765 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1766 	  "I219 V (17) Ethernet Connection",
   1767 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1768 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1769 	  "I219 V (18) Ethernet Connection",
   1770 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1771 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1772 	  "I219 V (19) Ethernet Connection",
   1773 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1774 	{ 0,			0,
   1775 	  NULL,
   1776 	  0,			0 },
   1777 };
   1778 
   1779 /*
   1780  * Register read/write functions.
   1781  * Other than CSR_{READ|WRITE}().
   1782  */
   1783 
   1784 #if 0 /* Not currently used */
   1785 static inline uint32_t
   1786 wm_io_read(struct wm_softc *sc, int reg)
   1787 {
   1788 
   1789 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1790 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1791 }
   1792 #endif
   1793 
   1794 static inline void
   1795 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1796 {
   1797 
   1798 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1799 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1800 }
   1801 
   1802 static inline void
   1803 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1804     uint32_t data)
   1805 {
   1806 	uint32_t regval;
   1807 	int i;
   1808 
   1809 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1810 
   1811 	CSR_WRITE(sc, reg, regval);
   1812 
   1813 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1814 		delay(5);
   1815 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1816 			break;
   1817 	}
   1818 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1819 		aprint_error("%s: WARNING:"
   1820 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1821 		    device_xname(sc->sc_dev), reg);
   1822 	}
   1823 }
   1824 
   1825 static inline void
   1826 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1827 {
   1828 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
   1829 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
   1830 }
   1831 
   1832 /*
   1833  * Descriptor sync/init functions.
   1834  */
   1835 static inline void
   1836 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1837 {
   1838 	struct wm_softc *sc = txq->txq_sc;
   1839 
   1840 	/* If it will wrap around, sync to the end of the ring. */
   1841 	if ((start + num) > WM_NTXDESC(txq)) {
   1842 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1843 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1844 		    (WM_NTXDESC(txq) - start), ops);
   1845 		num -= (WM_NTXDESC(txq) - start);
   1846 		start = 0;
   1847 	}
   1848 
   1849 	/* Now sync whatever is left. */
   1850 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1851 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1852 }
   1853 
   1854 static inline void
   1855 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1856 {
   1857 	struct wm_softc *sc = rxq->rxq_sc;
   1858 
   1859 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1860 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1861 }
   1862 
   1863 static inline void
   1864 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1865 {
   1866 	struct wm_softc *sc = rxq->rxq_sc;
   1867 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1868 	struct mbuf *m = rxs->rxs_mbuf;
   1869 
   1870 	/*
   1871 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1872 	 * so that the payload after the Ethernet header is aligned
   1873 	 * to a 4-byte boundary.
   1874 
   1875 	 * XXX BRAINDAMAGE ALERT!
   1876 	 * The stupid chip uses the same size for every buffer, which
   1877 	 * is set in the Receive Control register.  We are using the 2K
   1878 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1879 	 * reason, we can't "scoot" packets longer than the standard
   1880 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1881 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1882 	 * the upper layer copy the headers.
   1883 	 */
   1884 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1885 
   1886 	if (sc->sc_type == WM_T_82574) {
   1887 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1888 		rxd->erx_data.erxd_addr =
   1889 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1890 		rxd->erx_data.erxd_dd = 0;
   1891 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1892 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1893 
   1894 		rxd->nqrx_data.nrxd_paddr =
   1895 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1896 		/* Currently, split header is not supported. */
   1897 		rxd->nqrx_data.nrxd_haddr = 0;
   1898 	} else {
   1899 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1900 
   1901 		wm_set_dma_addr(&rxd->wrx_addr,
   1902 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1903 		rxd->wrx_len = 0;
   1904 		rxd->wrx_cksum = 0;
   1905 		rxd->wrx_status = 0;
   1906 		rxd->wrx_errors = 0;
   1907 		rxd->wrx_special = 0;
   1908 	}
   1909 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1910 
   1911 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1912 }
   1913 
   1914 /*
   1915  * Device driver interface functions and commonly used functions.
   1916  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1917  */
   1918 
   1919 /* Lookup supported device table */
   1920 static const struct wm_product *
   1921 wm_lookup(const struct pci_attach_args *pa)
   1922 {
   1923 	const struct wm_product *wmp;
   1924 
   1925 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1926 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1927 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1928 			return wmp;
   1929 	}
   1930 	return NULL;
   1931 }
   1932 
   1933 /* The match function (ca_match) */
   1934 static int
   1935 wm_match(device_t parent, cfdata_t cf, void *aux)
   1936 {
   1937 	struct pci_attach_args *pa = aux;
   1938 
   1939 	if (wm_lookup(pa) != NULL)
   1940 		return 1;
   1941 
   1942 	return 0;
   1943 }
   1944 
   1945 /* The attach function (ca_attach) */
   1946 static void
   1947 wm_attach(device_t parent, device_t self, void *aux)
   1948 {
   1949 	struct wm_softc *sc = device_private(self);
   1950 	struct pci_attach_args *pa = aux;
   1951 	prop_dictionary_t dict;
   1952 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1953 	pci_chipset_tag_t pc = pa->pa_pc;
   1954 	int counts[PCI_INTR_TYPE_SIZE];
   1955 	pci_intr_type_t max_type;
   1956 	const char *eetype, *xname;
   1957 	bus_space_tag_t memt;
   1958 	bus_space_handle_t memh;
   1959 	bus_size_t memsize;
   1960 	int memh_valid;
   1961 	int i, error;
   1962 	const struct wm_product *wmp;
   1963 	prop_data_t ea;
   1964 	prop_number_t pn;
   1965 	uint8_t enaddr[ETHER_ADDR_LEN];
   1966 	char buf[256];
   1967 	char wqname[MAXCOMLEN];
   1968 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1969 	pcireg_t preg, memtype;
   1970 	uint16_t eeprom_data, apme_mask;
   1971 	bool force_clear_smbi;
   1972 	uint32_t link_mode;
   1973 	uint32_t reg;
   1974 
   1975 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1976 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1977 #endif
   1978 	sc->sc_dev = self;
   1979 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1980 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1981 	sc->sc_core_stopping = false;
   1982 
   1983 	wmp = wm_lookup(pa);
   1984 #ifdef DIAGNOSTIC
   1985 	if (wmp == NULL) {
   1986 		printf("\n");
   1987 		panic("wm_attach: impossible");
   1988 	}
   1989 #endif
   1990 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1991 
   1992 	sc->sc_pc = pa->pa_pc;
   1993 	sc->sc_pcitag = pa->pa_tag;
   1994 
   1995 	if (pci_dma64_available(pa)) {
   1996 		aprint_verbose(", 64-bit DMA");
   1997 		sc->sc_dmat = pa->pa_dmat64;
   1998 	} else {
   1999 		aprint_verbose(", 32-bit DMA");
   2000 		sc->sc_dmat = pa->pa_dmat;
   2001 	}
   2002 
   2003 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   2004 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   2005 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   2006 
   2007 	sc->sc_type = wmp->wmp_type;
   2008 
   2009 	/* Set default function pointers */
   2010 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   2011 	sc->phy.release = sc->nvm.release = wm_put_null;
   2012 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   2013 
   2014 	if (sc->sc_type < WM_T_82543) {
   2015 		if (sc->sc_rev < 2) {
   2016 			aprint_error_dev(sc->sc_dev,
   2017 			    "i82542 must be at least rev. 2\n");
   2018 			return;
   2019 		}
   2020 		if (sc->sc_rev < 3)
   2021 			sc->sc_type = WM_T_82542_2_0;
   2022 	}
   2023 
   2024 	/*
   2025 	 * Disable MSI for Errata:
   2026 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   2027 	 *
   2028 	 *  82544: Errata 25
   2029 	 *  82540: Errata  6 (easy to reproduce device timeout)
   2030 	 *  82545: Errata  4 (easy to reproduce device timeout)
   2031 	 *  82546: Errata 26 (easy to reproduce device timeout)
   2032 	 *  82541: Errata  7 (easy to reproduce device timeout)
   2033 	 *
   2034 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   2035 	 *
   2036 	 *  82571 & 82572: Errata 63
   2037 	 */
   2038 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   2039 	    || (sc->sc_type == WM_T_82572))
   2040 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   2041 
   2042 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2043 	    || (sc->sc_type == WM_T_82580)
   2044 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2045 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2046 		sc->sc_flags |= WM_F_NEWQUEUE;
   2047 
   2048 	/* Set device properties (mactype) */
   2049 	dict = device_properties(sc->sc_dev);
   2050 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   2051 
   2052 	/*
   2053 	 * Map the device.  All devices support memory-mapped acccess,
   2054 	 * and it is really required for normal operation.
   2055 	 */
   2056 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   2057 	switch (memtype) {
   2058 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2059 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2060 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   2061 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   2062 		break;
   2063 	default:
   2064 		memh_valid = 0;
   2065 		break;
   2066 	}
   2067 
   2068 	if (memh_valid) {
   2069 		sc->sc_st = memt;
   2070 		sc->sc_sh = memh;
   2071 		sc->sc_ss = memsize;
   2072 	} else {
   2073 		aprint_error_dev(sc->sc_dev,
   2074 		    "unable to map device registers\n");
   2075 		return;
   2076 	}
   2077 
   2078 	/*
   2079 	 * In addition, i82544 and later support I/O mapped indirect
   2080 	 * register access.  It is not desirable (nor supported in
   2081 	 * this driver) to use it for normal operation, though it is
   2082 	 * required to work around bugs in some chip versions.
   2083 	 */
   2084 	switch (sc->sc_type) {
   2085 	case WM_T_82544:
   2086 	case WM_T_82541:
   2087 	case WM_T_82541_2:
   2088 	case WM_T_82547:
   2089 	case WM_T_82547_2:
   2090 		/* First we have to find the I/O BAR. */
   2091 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2092 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2093 			if (memtype == PCI_MAPREG_TYPE_IO)
   2094 				break;
   2095 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2096 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2097 				i += 4;	/* skip high bits, too */
   2098 		}
   2099 		if (i < PCI_MAPREG_END) {
   2100 			/*
   2101 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2102 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2103 			 * It's no problem because newer chips has no this
   2104 			 * bug.
   2105 			 *
   2106 			 * The i8254x doesn't apparently respond when the
   2107 			 * I/O BAR is 0, which looks somewhat like it's not
   2108 			 * been configured.
   2109 			 */
   2110 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2111 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2112 				aprint_error_dev(sc->sc_dev,
   2113 				    "WARNING: I/O BAR at zero.\n");
   2114 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2115 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
   2116 			    == 0) {
   2117 				sc->sc_flags |= WM_F_IOH_VALID;
   2118 			} else
   2119 				aprint_error_dev(sc->sc_dev,
   2120 				    "WARNING: unable to map I/O space\n");
   2121 		}
   2122 		break;
   2123 	default:
   2124 		break;
   2125 	}
   2126 
   2127 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2128 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2129 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2130 	if (sc->sc_type < WM_T_82542_2_1)
   2131 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2132 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2133 
   2134 	/* Power up chip */
   2135 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2136 	    && error != EOPNOTSUPP) {
   2137 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2138 		return;
   2139 	}
   2140 
   2141 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2142 	/*
   2143 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2144 	 * resource.
   2145 	 */
   2146 	if (sc->sc_nqueues > 1) {
   2147 		max_type = PCI_INTR_TYPE_MSIX;
   2148 		/*
   2149 		 *  82583 has a MSI-X capability in the PCI configuration space
   2150 		 * but it doesn't support it. At least the document doesn't
   2151 		 * say anything about MSI-X.
   2152 		 */
   2153 		counts[PCI_INTR_TYPE_MSIX]
   2154 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2155 	} else {
   2156 		max_type = PCI_INTR_TYPE_MSI;
   2157 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2158 	}
   2159 
   2160 	/* Allocation settings */
   2161 	counts[PCI_INTR_TYPE_MSI] = 1;
   2162 	counts[PCI_INTR_TYPE_INTX] = 1;
   2163 	/* overridden by disable flags */
   2164 	if (wm_disable_msi != 0) {
   2165 		counts[PCI_INTR_TYPE_MSI] = 0;
   2166 		if (wm_disable_msix != 0) {
   2167 			max_type = PCI_INTR_TYPE_INTX;
   2168 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2169 		}
   2170 	} else if (wm_disable_msix != 0) {
   2171 		max_type = PCI_INTR_TYPE_MSI;
   2172 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2173 	}
   2174 
   2175 alloc_retry:
   2176 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2177 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2178 		return;
   2179 	}
   2180 
   2181 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2182 		error = wm_setup_msix(sc);
   2183 		if (error) {
   2184 			pci_intr_release(pc, sc->sc_intrs,
   2185 			    counts[PCI_INTR_TYPE_MSIX]);
   2186 
   2187 			/* Setup for MSI: Disable MSI-X */
   2188 			max_type = PCI_INTR_TYPE_MSI;
   2189 			counts[PCI_INTR_TYPE_MSI] = 1;
   2190 			counts[PCI_INTR_TYPE_INTX] = 1;
   2191 			goto alloc_retry;
   2192 		}
   2193 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2194 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2195 		error = wm_setup_legacy(sc);
   2196 		if (error) {
   2197 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2198 			    counts[PCI_INTR_TYPE_MSI]);
   2199 
   2200 			/* The next try is for INTx: Disable MSI */
   2201 			max_type = PCI_INTR_TYPE_INTX;
   2202 			counts[PCI_INTR_TYPE_INTX] = 1;
   2203 			goto alloc_retry;
   2204 		}
   2205 	} else {
   2206 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2207 		error = wm_setup_legacy(sc);
   2208 		if (error) {
   2209 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2210 			    counts[PCI_INTR_TYPE_INTX]);
   2211 			return;
   2212 		}
   2213 	}
   2214 
   2215 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2216 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2217 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2218 	    WM_WORKQUEUE_FLAGS);
   2219 	if (error) {
   2220 		aprint_error_dev(sc->sc_dev,
   2221 		    "unable to create workqueue\n");
   2222 		goto out;
   2223 	}
   2224 
   2225 	/*
   2226 	 * Check the function ID (unit number of the chip).
   2227 	 */
   2228 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2229 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2230 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2231 	    || (sc->sc_type == WM_T_82580)
   2232 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2233 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2234 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2235 	else
   2236 		sc->sc_funcid = 0;
   2237 
   2238 	/*
   2239 	 * Determine a few things about the bus we're connected to.
   2240 	 */
   2241 	if (sc->sc_type < WM_T_82543) {
   2242 		/* We don't really know the bus characteristics here. */
   2243 		sc->sc_bus_speed = 33;
   2244 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2245 		/*
   2246 		 * CSA (Communication Streaming Architecture) is about as fast
   2247 		 * a 32-bit 66MHz PCI Bus.
   2248 		 */
   2249 		sc->sc_flags |= WM_F_CSA;
   2250 		sc->sc_bus_speed = 66;
   2251 		aprint_verbose_dev(sc->sc_dev,
   2252 		    "Communication Streaming Architecture\n");
   2253 		if (sc->sc_type == WM_T_82547) {
   2254 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2255 			callout_setfunc(&sc->sc_txfifo_ch,
   2256 			    wm_82547_txfifo_stall, sc);
   2257 			aprint_verbose_dev(sc->sc_dev,
   2258 			    "using 82547 Tx FIFO stall work-around\n");
   2259 		}
   2260 	} else if (sc->sc_type >= WM_T_82571) {
   2261 		sc->sc_flags |= WM_F_PCIE;
   2262 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2263 		    && (sc->sc_type != WM_T_ICH10)
   2264 		    && (sc->sc_type != WM_T_PCH)
   2265 		    && (sc->sc_type != WM_T_PCH2)
   2266 		    && (sc->sc_type != WM_T_PCH_LPT)
   2267 		    && (sc->sc_type != WM_T_PCH_SPT)
   2268 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2269 			/* ICH* and PCH* have no PCIe capability registers */
   2270 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2271 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2272 				NULL) == 0)
   2273 				aprint_error_dev(sc->sc_dev,
   2274 				    "unable to find PCIe capability\n");
   2275 		}
   2276 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2277 	} else {
   2278 		reg = CSR_READ(sc, WMREG_STATUS);
   2279 		if (reg & STATUS_BUS64)
   2280 			sc->sc_flags |= WM_F_BUS64;
   2281 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2282 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2283 
   2284 			sc->sc_flags |= WM_F_PCIX;
   2285 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2286 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2287 				aprint_error_dev(sc->sc_dev,
   2288 				    "unable to find PCIX capability\n");
   2289 			else if (sc->sc_type != WM_T_82545_3 &&
   2290 				 sc->sc_type != WM_T_82546_3) {
   2291 				/*
   2292 				 * Work around a problem caused by the BIOS
   2293 				 * setting the max memory read byte count
   2294 				 * incorrectly.
   2295 				 */
   2296 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2297 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2298 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2299 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2300 
   2301 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2302 				    PCIX_CMD_BYTECNT_SHIFT;
   2303 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2304 				    PCIX_STATUS_MAXB_SHIFT;
   2305 				if (bytecnt > maxb) {
   2306 					aprint_verbose_dev(sc->sc_dev,
   2307 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2308 					    512 << bytecnt, 512 << maxb);
   2309 					pcix_cmd = (pcix_cmd &
   2310 					    ~PCIX_CMD_BYTECNT_MASK) |
   2311 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2312 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2313 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2314 					    pcix_cmd);
   2315 				}
   2316 			}
   2317 		}
   2318 		/*
   2319 		 * The quad port adapter is special; it has a PCIX-PCIX
   2320 		 * bridge on the board, and can run the secondary bus at
   2321 		 * a higher speed.
   2322 		 */
   2323 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2324 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2325 								      : 66;
   2326 		} else if (sc->sc_flags & WM_F_PCIX) {
   2327 			switch (reg & STATUS_PCIXSPD_MASK) {
   2328 			case STATUS_PCIXSPD_50_66:
   2329 				sc->sc_bus_speed = 66;
   2330 				break;
   2331 			case STATUS_PCIXSPD_66_100:
   2332 				sc->sc_bus_speed = 100;
   2333 				break;
   2334 			case STATUS_PCIXSPD_100_133:
   2335 				sc->sc_bus_speed = 133;
   2336 				break;
   2337 			default:
   2338 				aprint_error_dev(sc->sc_dev,
   2339 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2340 				    reg & STATUS_PCIXSPD_MASK);
   2341 				sc->sc_bus_speed = 66;
   2342 				break;
   2343 			}
   2344 		} else
   2345 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2346 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2347 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2348 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2349 	}
   2350 
   2351 	/* clear interesting stat counters */
   2352 	CSR_READ(sc, WMREG_COLC);
   2353 	CSR_READ(sc, WMREG_RXERRC);
   2354 
   2355 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2356 	    || (sc->sc_type >= WM_T_ICH8))
   2357 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2358 	if (sc->sc_type >= WM_T_ICH8)
   2359 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2360 
   2361 	/* Set PHY, NVM mutex related stuff */
   2362 	switch (sc->sc_type) {
   2363 	case WM_T_82542_2_0:
   2364 	case WM_T_82542_2_1:
   2365 	case WM_T_82543:
   2366 	case WM_T_82544:
   2367 		/* Microwire */
   2368 		sc->nvm.read = wm_nvm_read_uwire;
   2369 		sc->sc_nvm_wordsize = 64;
   2370 		sc->sc_nvm_addrbits = 6;
   2371 		break;
   2372 	case WM_T_82540:
   2373 	case WM_T_82545:
   2374 	case WM_T_82545_3:
   2375 	case WM_T_82546:
   2376 	case WM_T_82546_3:
   2377 		/* Microwire */
   2378 		sc->nvm.read = wm_nvm_read_uwire;
   2379 		reg = CSR_READ(sc, WMREG_EECD);
   2380 		if (reg & EECD_EE_SIZE) {
   2381 			sc->sc_nvm_wordsize = 256;
   2382 			sc->sc_nvm_addrbits = 8;
   2383 		} else {
   2384 			sc->sc_nvm_wordsize = 64;
   2385 			sc->sc_nvm_addrbits = 6;
   2386 		}
   2387 		sc->sc_flags |= WM_F_LOCK_EECD;
   2388 		sc->nvm.acquire = wm_get_eecd;
   2389 		sc->nvm.release = wm_put_eecd;
   2390 		break;
   2391 	case WM_T_82541:
   2392 	case WM_T_82541_2:
   2393 	case WM_T_82547:
   2394 	case WM_T_82547_2:
   2395 		reg = CSR_READ(sc, WMREG_EECD);
   2396 		/*
   2397 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2398 		 * on 8254[17], so set flags and functios before calling it.
   2399 		 */
   2400 		sc->sc_flags |= WM_F_LOCK_EECD;
   2401 		sc->nvm.acquire = wm_get_eecd;
   2402 		sc->nvm.release = wm_put_eecd;
   2403 		if (reg & EECD_EE_TYPE) {
   2404 			/* SPI */
   2405 			sc->nvm.read = wm_nvm_read_spi;
   2406 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2407 			wm_nvm_set_addrbits_size_eecd(sc);
   2408 		} else {
   2409 			/* Microwire */
   2410 			sc->nvm.read = wm_nvm_read_uwire;
   2411 			if ((reg & EECD_EE_ABITS) != 0) {
   2412 				sc->sc_nvm_wordsize = 256;
   2413 				sc->sc_nvm_addrbits = 8;
   2414 			} else {
   2415 				sc->sc_nvm_wordsize = 64;
   2416 				sc->sc_nvm_addrbits = 6;
   2417 			}
   2418 		}
   2419 		break;
   2420 	case WM_T_82571:
   2421 	case WM_T_82572:
   2422 		/* SPI */
   2423 		sc->nvm.read = wm_nvm_read_eerd;
   2424 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2425 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2426 		wm_nvm_set_addrbits_size_eecd(sc);
   2427 		sc->phy.acquire = wm_get_swsm_semaphore;
   2428 		sc->phy.release = wm_put_swsm_semaphore;
   2429 		sc->nvm.acquire = wm_get_nvm_82571;
   2430 		sc->nvm.release = wm_put_nvm_82571;
   2431 		break;
   2432 	case WM_T_82573:
   2433 	case WM_T_82574:
   2434 	case WM_T_82583:
   2435 		sc->nvm.read = wm_nvm_read_eerd;
   2436 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2437 		if (sc->sc_type == WM_T_82573) {
   2438 			sc->phy.acquire = wm_get_swsm_semaphore;
   2439 			sc->phy.release = wm_put_swsm_semaphore;
   2440 			sc->nvm.acquire = wm_get_nvm_82571;
   2441 			sc->nvm.release = wm_put_nvm_82571;
   2442 		} else {
   2443 			/* Both PHY and NVM use the same semaphore. */
   2444 			sc->phy.acquire = sc->nvm.acquire
   2445 			    = wm_get_swfwhw_semaphore;
   2446 			sc->phy.release = sc->nvm.release
   2447 			    = wm_put_swfwhw_semaphore;
   2448 		}
   2449 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2450 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2451 			sc->sc_nvm_wordsize = 2048;
   2452 		} else {
   2453 			/* SPI */
   2454 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2455 			wm_nvm_set_addrbits_size_eecd(sc);
   2456 		}
   2457 		break;
   2458 	case WM_T_82575:
   2459 	case WM_T_82576:
   2460 	case WM_T_82580:
   2461 	case WM_T_I350:
   2462 	case WM_T_I354:
   2463 	case WM_T_80003:
   2464 		/* SPI */
   2465 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2466 		wm_nvm_set_addrbits_size_eecd(sc);
   2467 		if ((sc->sc_type == WM_T_80003)
   2468 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2469 			sc->nvm.read = wm_nvm_read_eerd;
   2470 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2471 		} else {
   2472 			sc->nvm.read = wm_nvm_read_spi;
   2473 			sc->sc_flags |= WM_F_LOCK_EECD;
   2474 		}
   2475 		sc->phy.acquire = wm_get_phy_82575;
   2476 		sc->phy.release = wm_put_phy_82575;
   2477 		sc->nvm.acquire = wm_get_nvm_80003;
   2478 		sc->nvm.release = wm_put_nvm_80003;
   2479 		break;
   2480 	case WM_T_ICH8:
   2481 	case WM_T_ICH9:
   2482 	case WM_T_ICH10:
   2483 	case WM_T_PCH:
   2484 	case WM_T_PCH2:
   2485 	case WM_T_PCH_LPT:
   2486 		sc->nvm.read = wm_nvm_read_ich8;
   2487 		/* FLASH */
   2488 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2489 		sc->sc_nvm_wordsize = 2048;
   2490 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2491 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2492 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2493 			aprint_error_dev(sc->sc_dev,
   2494 			    "can't map FLASH registers\n");
   2495 			goto out;
   2496 		}
   2497 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2498 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2499 		    ICH_FLASH_SECTOR_SIZE;
   2500 		sc->sc_ich8_flash_bank_size =
   2501 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2502 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2503 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2504 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2505 		sc->sc_flashreg_offset = 0;
   2506 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2507 		sc->phy.release = wm_put_swflag_ich8lan;
   2508 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2509 		sc->nvm.release = wm_put_nvm_ich8lan;
   2510 		break;
   2511 	case WM_T_PCH_SPT:
   2512 	case WM_T_PCH_CNP:
   2513 		sc->nvm.read = wm_nvm_read_spt;
   2514 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2515 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2516 		sc->sc_flasht = sc->sc_st;
   2517 		sc->sc_flashh = sc->sc_sh;
   2518 		sc->sc_ich8_flash_base = 0;
   2519 		sc->sc_nvm_wordsize =
   2520 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2521 		    * NVM_SIZE_MULTIPLIER;
   2522 		/* It is size in bytes, we want words */
   2523 		sc->sc_nvm_wordsize /= 2;
   2524 		/* Assume 2 banks */
   2525 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2526 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2527 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2528 		sc->phy.release = wm_put_swflag_ich8lan;
   2529 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2530 		sc->nvm.release = wm_put_nvm_ich8lan;
   2531 		break;
   2532 	case WM_T_I210:
   2533 	case WM_T_I211:
   2534 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2535 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2536 		if (wm_nvm_flash_presence_i210(sc)) {
   2537 			sc->nvm.read = wm_nvm_read_eerd;
   2538 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2539 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2540 			wm_nvm_set_addrbits_size_eecd(sc);
   2541 		} else {
   2542 			sc->nvm.read = wm_nvm_read_invm;
   2543 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2544 			sc->sc_nvm_wordsize = INVM_SIZE;
   2545 		}
   2546 		sc->phy.acquire = wm_get_phy_82575;
   2547 		sc->phy.release = wm_put_phy_82575;
   2548 		sc->nvm.acquire = wm_get_nvm_80003;
   2549 		sc->nvm.release = wm_put_nvm_80003;
   2550 		break;
   2551 	default:
   2552 		break;
   2553 	}
   2554 
   2555 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2556 	switch (sc->sc_type) {
   2557 	case WM_T_82571:
   2558 	case WM_T_82572:
   2559 		reg = CSR_READ(sc, WMREG_SWSM2);
   2560 		if ((reg & SWSM2_LOCK) == 0) {
   2561 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2562 			force_clear_smbi = true;
   2563 		} else
   2564 			force_clear_smbi = false;
   2565 		break;
   2566 	case WM_T_82573:
   2567 	case WM_T_82574:
   2568 	case WM_T_82583:
   2569 		force_clear_smbi = true;
   2570 		break;
   2571 	default:
   2572 		force_clear_smbi = false;
   2573 		break;
   2574 	}
   2575 	if (force_clear_smbi) {
   2576 		reg = CSR_READ(sc, WMREG_SWSM);
   2577 		if ((reg & SWSM_SMBI) != 0)
   2578 			aprint_error_dev(sc->sc_dev,
   2579 			    "Please update the Bootagent\n");
   2580 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2581 	}
   2582 
   2583 	/*
   2584 	 * Defer printing the EEPROM type until after verifying the checksum
   2585 	 * This allows the EEPROM type to be printed correctly in the case
   2586 	 * that no EEPROM is attached.
   2587 	 */
   2588 	/*
   2589 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2590 	 * this for later, so we can fail future reads from the EEPROM.
   2591 	 */
   2592 	if (wm_nvm_validate_checksum(sc)) {
   2593 		/*
   2594 		 * Read twice again because some PCI-e parts fail the
   2595 		 * first check due to the link being in sleep state.
   2596 		 */
   2597 		if (wm_nvm_validate_checksum(sc))
   2598 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2599 	}
   2600 
   2601 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2602 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2603 	else {
   2604 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2605 		    sc->sc_nvm_wordsize);
   2606 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2607 			aprint_verbose("iNVM");
   2608 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2609 			aprint_verbose("FLASH(HW)");
   2610 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2611 			aprint_verbose("FLASH");
   2612 		else {
   2613 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2614 				eetype = "SPI";
   2615 			else
   2616 				eetype = "MicroWire";
   2617 			aprint_verbose("(%d address bits) %s EEPROM",
   2618 			    sc->sc_nvm_addrbits, eetype);
   2619 		}
   2620 	}
   2621 	wm_nvm_version(sc);
   2622 	aprint_verbose("\n");
   2623 
   2624 	/*
   2625 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2626 	 * incorrect.
   2627 	 */
   2628 	wm_gmii_setup_phytype(sc, 0, 0);
   2629 
   2630 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2631 	switch (sc->sc_type) {
   2632 	case WM_T_ICH8:
   2633 	case WM_T_ICH9:
   2634 	case WM_T_ICH10:
   2635 	case WM_T_PCH:
   2636 	case WM_T_PCH2:
   2637 	case WM_T_PCH_LPT:
   2638 	case WM_T_PCH_SPT:
   2639 	case WM_T_PCH_CNP:
   2640 		apme_mask = WUC_APME;
   2641 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2642 		if ((eeprom_data & apme_mask) != 0)
   2643 			sc->sc_flags |= WM_F_WOL;
   2644 		break;
   2645 	default:
   2646 		break;
   2647 	}
   2648 
   2649 	/* Reset the chip to a known state. */
   2650 	wm_reset(sc);
   2651 
   2652 	/*
   2653 	 * Check for I21[01] PLL workaround.
   2654 	 *
   2655 	 * Three cases:
   2656 	 * a) Chip is I211.
   2657 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2658 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2659 	 */
   2660 	if (sc->sc_type == WM_T_I211)
   2661 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2662 	if (sc->sc_type == WM_T_I210) {
   2663 		if (!wm_nvm_flash_presence_i210(sc))
   2664 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2665 		else if ((sc->sc_nvm_ver_major < 3)
   2666 		    || ((sc->sc_nvm_ver_major == 3)
   2667 			&& (sc->sc_nvm_ver_minor < 25))) {
   2668 			aprint_verbose_dev(sc->sc_dev,
   2669 			    "ROM image version %d.%d is older than 3.25\n",
   2670 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2671 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2672 		}
   2673 	}
   2674 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2675 		wm_pll_workaround_i210(sc);
   2676 
   2677 	wm_get_wakeup(sc);
   2678 
   2679 	/* Non-AMT based hardware can now take control from firmware */
   2680 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2681 		wm_get_hw_control(sc);
   2682 
   2683 	/*
   2684 	 * Read the Ethernet address from the EEPROM, if not first found
   2685 	 * in device properties.
   2686 	 */
   2687 	ea = prop_dictionary_get(dict, "mac-address");
   2688 	if (ea != NULL) {
   2689 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2690 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2691 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2692 	} else {
   2693 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2694 			aprint_error_dev(sc->sc_dev,
   2695 			    "unable to read Ethernet address\n");
   2696 			goto out;
   2697 		}
   2698 	}
   2699 
   2700 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2701 	    ether_sprintf(enaddr));
   2702 
   2703 	/*
   2704 	 * Read the config info from the EEPROM, and set up various
   2705 	 * bits in the control registers based on their contents.
   2706 	 */
   2707 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2708 	if (pn != NULL) {
   2709 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2710 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2711 	} else {
   2712 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2713 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2714 			goto out;
   2715 		}
   2716 	}
   2717 
   2718 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2719 	if (pn != NULL) {
   2720 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2721 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2722 	} else {
   2723 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2724 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2725 			goto out;
   2726 		}
   2727 	}
   2728 
   2729 	/* check for WM_F_WOL */
   2730 	switch (sc->sc_type) {
   2731 	case WM_T_82542_2_0:
   2732 	case WM_T_82542_2_1:
   2733 	case WM_T_82543:
   2734 		/* dummy? */
   2735 		eeprom_data = 0;
   2736 		apme_mask = NVM_CFG3_APME;
   2737 		break;
   2738 	case WM_T_82544:
   2739 		apme_mask = NVM_CFG2_82544_APM_EN;
   2740 		eeprom_data = cfg2;
   2741 		break;
   2742 	case WM_T_82546:
   2743 	case WM_T_82546_3:
   2744 	case WM_T_82571:
   2745 	case WM_T_82572:
   2746 	case WM_T_82573:
   2747 	case WM_T_82574:
   2748 	case WM_T_82583:
   2749 	case WM_T_80003:
   2750 	case WM_T_82575:
   2751 	case WM_T_82576:
   2752 		apme_mask = NVM_CFG3_APME;
   2753 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2754 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2755 		break;
   2756 	case WM_T_82580:
   2757 	case WM_T_I350:
   2758 	case WM_T_I354:
   2759 	case WM_T_I210:
   2760 	case WM_T_I211:
   2761 		apme_mask = NVM_CFG3_APME;
   2762 		wm_nvm_read(sc,
   2763 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2764 		    1, &eeprom_data);
   2765 		break;
   2766 	case WM_T_ICH8:
   2767 	case WM_T_ICH9:
   2768 	case WM_T_ICH10:
   2769 	case WM_T_PCH:
   2770 	case WM_T_PCH2:
   2771 	case WM_T_PCH_LPT:
   2772 	case WM_T_PCH_SPT:
   2773 	case WM_T_PCH_CNP:
   2774 		/* Already checked before wm_reset () */
   2775 		apme_mask = eeprom_data = 0;
   2776 		break;
   2777 	default: /* XXX 82540 */
   2778 		apme_mask = NVM_CFG3_APME;
   2779 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2780 		break;
   2781 	}
   2782 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2783 	if ((eeprom_data & apme_mask) != 0)
   2784 		sc->sc_flags |= WM_F_WOL;
   2785 
   2786 	/*
   2787 	 * We have the eeprom settings, now apply the special cases
   2788 	 * where the eeprom may be wrong or the board won't support
   2789 	 * wake on lan on a particular port
   2790 	 */
   2791 	switch (sc->sc_pcidevid) {
   2792 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2793 		sc->sc_flags &= ~WM_F_WOL;
   2794 		break;
   2795 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2796 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2797 		/* Wake events only supported on port A for dual fiber
   2798 		 * regardless of eeprom setting */
   2799 		if (sc->sc_funcid == 1)
   2800 			sc->sc_flags &= ~WM_F_WOL;
   2801 		break;
   2802 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2803 		/* If quad port adapter, disable WoL on all but port A */
   2804 		if (sc->sc_funcid != 0)
   2805 			sc->sc_flags &= ~WM_F_WOL;
   2806 		break;
   2807 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2808 		/* Wake events only supported on port A for dual fiber
   2809 		 * regardless of eeprom setting */
   2810 		if (sc->sc_funcid == 1)
   2811 			sc->sc_flags &= ~WM_F_WOL;
   2812 		break;
   2813 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2814 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2815 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2816 		/* If quad port adapter, disable WoL on all but port A */
   2817 		if (sc->sc_funcid != 0)
   2818 			sc->sc_flags &= ~WM_F_WOL;
   2819 		break;
   2820 	}
   2821 
   2822 	if (sc->sc_type >= WM_T_82575) {
   2823 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2824 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2825 			    nvmword);
   2826 			if ((sc->sc_type == WM_T_82575) ||
   2827 			    (sc->sc_type == WM_T_82576)) {
   2828 				/* Check NVM for autonegotiation */
   2829 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2830 				    != 0)
   2831 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2832 			}
   2833 			if ((sc->sc_type == WM_T_82575) ||
   2834 			    (sc->sc_type == WM_T_I350)) {
   2835 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2836 					sc->sc_flags |= WM_F_MAS;
   2837 			}
   2838 		}
   2839 	}
   2840 
   2841 	/*
   2842 	 * XXX need special handling for some multiple port cards
   2843 	 * to disable a paticular port.
   2844 	 */
   2845 
   2846 	if (sc->sc_type >= WM_T_82544) {
   2847 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2848 		if (pn != NULL) {
   2849 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2850 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2851 		} else {
   2852 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2853 				aprint_error_dev(sc->sc_dev,
   2854 				    "unable to read SWDPIN\n");
   2855 				goto out;
   2856 			}
   2857 		}
   2858 	}
   2859 
   2860 	if (cfg1 & NVM_CFG1_ILOS)
   2861 		sc->sc_ctrl |= CTRL_ILOS;
   2862 
   2863 	/*
   2864 	 * XXX
   2865 	 * This code isn't correct because pin 2 and 3 are located
   2866 	 * in different position on newer chips. Check all datasheet.
   2867 	 *
   2868 	 * Until resolve this problem, check if a chip < 82580
   2869 	 */
   2870 	if (sc->sc_type <= WM_T_82580) {
   2871 		if (sc->sc_type >= WM_T_82544) {
   2872 			sc->sc_ctrl |=
   2873 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2874 			    CTRL_SWDPIO_SHIFT;
   2875 			sc->sc_ctrl |=
   2876 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2877 			    CTRL_SWDPINS_SHIFT;
   2878 		} else {
   2879 			sc->sc_ctrl |=
   2880 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2881 			    CTRL_SWDPIO_SHIFT;
   2882 		}
   2883 	}
   2884 
   2885 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2886 		wm_nvm_read(sc,
   2887 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2888 		    1, &nvmword);
   2889 		if (nvmword & NVM_CFG3_ILOS)
   2890 			sc->sc_ctrl |= CTRL_ILOS;
   2891 	}
   2892 
   2893 #if 0
   2894 	if (sc->sc_type >= WM_T_82544) {
   2895 		if (cfg1 & NVM_CFG1_IPS0)
   2896 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2897 		if (cfg1 & NVM_CFG1_IPS1)
   2898 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2899 		sc->sc_ctrl_ext |=
   2900 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2901 		    CTRL_EXT_SWDPIO_SHIFT;
   2902 		sc->sc_ctrl_ext |=
   2903 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2904 		    CTRL_EXT_SWDPINS_SHIFT;
   2905 	} else {
   2906 		sc->sc_ctrl_ext |=
   2907 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2908 		    CTRL_EXT_SWDPIO_SHIFT;
   2909 	}
   2910 #endif
   2911 
   2912 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2913 #if 0
   2914 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2915 #endif
   2916 
   2917 	if (sc->sc_type == WM_T_PCH) {
   2918 		uint16_t val;
   2919 
   2920 		/* Save the NVM K1 bit setting */
   2921 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2922 
   2923 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2924 			sc->sc_nvm_k1_enabled = 1;
   2925 		else
   2926 			sc->sc_nvm_k1_enabled = 0;
   2927 	}
   2928 
   2929 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2930 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2931 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2932 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2933 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2934 	    || sc->sc_type == WM_T_82573
   2935 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2936 		/* Copper only */
   2937 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2938 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2939 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2940 	    || (sc->sc_type ==WM_T_I211)) {
   2941 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2942 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2943 		switch (link_mode) {
   2944 		case CTRL_EXT_LINK_MODE_1000KX:
   2945 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2946 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2947 			break;
   2948 		case CTRL_EXT_LINK_MODE_SGMII:
   2949 			if (wm_sgmii_uses_mdio(sc)) {
   2950 				aprint_normal_dev(sc->sc_dev,
   2951 				    "SGMII(MDIO)\n");
   2952 				sc->sc_flags |= WM_F_SGMII;
   2953 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2954 				break;
   2955 			}
   2956 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2957 			/*FALLTHROUGH*/
   2958 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2959 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2960 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2961 				if (link_mode
   2962 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2963 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2964 					sc->sc_flags |= WM_F_SGMII;
   2965 					aprint_verbose_dev(sc->sc_dev,
   2966 					    "SGMII\n");
   2967 				} else {
   2968 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2969 					aprint_verbose_dev(sc->sc_dev,
   2970 					    "SERDES\n");
   2971 				}
   2972 				break;
   2973 			}
   2974 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2975 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2976 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2977 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2978 				sc->sc_flags |= WM_F_SGMII;
   2979 			}
   2980 			/* Do not change link mode for 100BaseFX */
   2981 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2982 				break;
   2983 
   2984 			/* Change current link mode setting */
   2985 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2986 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2987 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2988 			else
   2989 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2990 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2991 			break;
   2992 		case CTRL_EXT_LINK_MODE_GMII:
   2993 		default:
   2994 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2995 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2996 			break;
   2997 		}
   2998 
   2999 		reg &= ~CTRL_EXT_I2C_ENA;
   3000 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   3001 			reg |= CTRL_EXT_I2C_ENA;
   3002 		else
   3003 			reg &= ~CTRL_EXT_I2C_ENA;
   3004 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3005 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   3006 			if (!wm_sgmii_uses_mdio(sc))
   3007 				wm_gmii_setup_phytype(sc, 0, 0);
   3008 			wm_reset_mdicnfg_82580(sc);
   3009 		}
   3010 	} else if (sc->sc_type < WM_T_82543 ||
   3011 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   3012 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3013 			aprint_error_dev(sc->sc_dev,
   3014 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   3015 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   3016 		}
   3017 	} else {
   3018 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   3019 			aprint_error_dev(sc->sc_dev,
   3020 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   3021 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3022 		}
   3023 	}
   3024 
   3025 	if (sc->sc_type >= WM_T_PCH2)
   3026 		sc->sc_flags |= WM_F_EEE;
   3027 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   3028 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   3029 		/* XXX: Need special handling for I354. (not yet) */
   3030 		if (sc->sc_type != WM_T_I354)
   3031 			sc->sc_flags |= WM_F_EEE;
   3032 	}
   3033 
   3034 	/*
   3035 	 * The I350 has a bug where it always strips the CRC whether
   3036 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   3037 	 */
   3038 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3039 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3040 		sc->sc_flags |= WM_F_CRC_STRIP;
   3041 
   3042 	/* Set device properties (macflags) */
   3043 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   3044 
   3045 	if (sc->sc_flags != 0) {
   3046 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   3047 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   3048 	}
   3049 
   3050 #ifdef WM_MPSAFE
   3051 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   3052 #else
   3053 	sc->sc_core_lock = NULL;
   3054 #endif
   3055 
   3056 	/* Initialize the media structures accordingly. */
   3057 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3058 		wm_gmii_mediainit(sc, wmp->wmp_product);
   3059 	else
   3060 		wm_tbi_mediainit(sc); /* All others */
   3061 
   3062 	ifp = &sc->sc_ethercom.ec_if;
   3063 	xname = device_xname(sc->sc_dev);
   3064 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3065 	ifp->if_softc = sc;
   3066 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3067 #ifdef WM_MPSAFE
   3068 	ifp->if_extflags = IFEF_MPSAFE;
   3069 #endif
   3070 	ifp->if_ioctl = wm_ioctl;
   3071 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3072 		ifp->if_start = wm_nq_start;
   3073 		/*
   3074 		 * When the number of CPUs is one and the controller can use
   3075 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3076 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3077 		 * and the other is used for link status changing.
   3078 		 * In this situation, wm_nq_transmit() is disadvantageous
   3079 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3080 		 */
   3081 		if (wm_is_using_multiqueue(sc))
   3082 			ifp->if_transmit = wm_nq_transmit;
   3083 	} else {
   3084 		ifp->if_start = wm_start;
   3085 		/*
   3086 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
   3087 		 * described above.
   3088 		 */
   3089 		if (wm_is_using_multiqueue(sc))
   3090 			ifp->if_transmit = wm_transmit;
   3091 	}
   3092 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3093 	ifp->if_init = wm_init;
   3094 	ifp->if_stop = wm_stop;
   3095 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3096 	IFQ_SET_READY(&ifp->if_snd);
   3097 
   3098 	/* Check for jumbo frame */
   3099 	switch (sc->sc_type) {
   3100 	case WM_T_82573:
   3101 		/* XXX limited to 9234 if ASPM is disabled */
   3102 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3103 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3104 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3105 		break;
   3106 	case WM_T_82571:
   3107 	case WM_T_82572:
   3108 	case WM_T_82574:
   3109 	case WM_T_82583:
   3110 	case WM_T_82575:
   3111 	case WM_T_82576:
   3112 	case WM_T_82580:
   3113 	case WM_T_I350:
   3114 	case WM_T_I354:
   3115 	case WM_T_I210:
   3116 	case WM_T_I211:
   3117 	case WM_T_80003:
   3118 	case WM_T_ICH9:
   3119 	case WM_T_ICH10:
   3120 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3121 	case WM_T_PCH_LPT:
   3122 	case WM_T_PCH_SPT:
   3123 	case WM_T_PCH_CNP:
   3124 		/* XXX limited to 9234 */
   3125 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3126 		break;
   3127 	case WM_T_PCH:
   3128 		/* XXX limited to 4096 */
   3129 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3130 		break;
   3131 	case WM_T_82542_2_0:
   3132 	case WM_T_82542_2_1:
   3133 	case WM_T_ICH8:
   3134 		/* No support for jumbo frame */
   3135 		break;
   3136 	default:
   3137 		/* ETHER_MAX_LEN_JUMBO */
   3138 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3139 		break;
   3140 	}
   3141 
   3142 	/* If we're a i82543 or greater, we can support VLANs. */
   3143 	if (sc->sc_type >= WM_T_82543) {
   3144 		sc->sc_ethercom.ec_capabilities |=
   3145 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3146 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3147 	}
   3148 
   3149 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3150 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3151 
   3152 	/*
   3153 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
   3154 	 * on i82543 and later.
   3155 	 */
   3156 	if (sc->sc_type >= WM_T_82543) {
   3157 		ifp->if_capabilities |=
   3158 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3159 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3160 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3161 		    IFCAP_CSUM_TCPv6_Tx |
   3162 		    IFCAP_CSUM_UDPv6_Tx;
   3163 	}
   3164 
   3165 	/*
   3166 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3167 	 *
   3168 	 *	82541GI (8086:1076) ... no
   3169 	 *	82572EI (8086:10b9) ... yes
   3170 	 */
   3171 	if (sc->sc_type >= WM_T_82571) {
   3172 		ifp->if_capabilities |=
   3173 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3174 	}
   3175 
   3176 	/*
   3177 	 * If we're a i82544 or greater (except i82547), we can do
   3178 	 * TCP segmentation offload.
   3179 	 */
   3180 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
   3181 		ifp->if_capabilities |= IFCAP_TSOv4;
   3182 
   3183 	if (sc->sc_type >= WM_T_82571)
   3184 		ifp->if_capabilities |= IFCAP_TSOv6;
   3185 
   3186 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3187 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3188 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3189 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3190 
   3191 	/* Attach the interface. */
   3192 	if_initialize(ifp);
   3193 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3194 	ether_ifattach(ifp, enaddr);
   3195 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3196 	if_register(ifp);
   3197 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3198 	    RND_FLAG_DEFAULT);
   3199 
   3200 #ifdef WM_EVENT_COUNTERS
   3201 	/* Attach event counters. */
   3202 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3203 	    NULL, xname, "linkintr");
   3204 
   3205 	if (sc->sc_type >= WM_T_82542_2_1) {
   3206 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3207 		    NULL, xname, "tx_xoff");
   3208 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3209 		    NULL, xname, "tx_xon");
   3210 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3211 		    NULL, xname, "rx_xoff");
   3212 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3213 		    NULL, xname, "rx_xon");
   3214 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3215 		    NULL, xname, "rx_macctl");
   3216 	}
   3217 
   3218 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
   3219 	    NULL, xname, "CRC Error");
   3220 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
   3221 	    NULL, xname, "Symbol Error");
   3222 
   3223 	if (sc->sc_type >= WM_T_82543) {
   3224 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
   3225 		    NULL, xname, "Alignment Error");
   3226 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
   3227 		    NULL, xname, "Receive Error");
   3228 		evcnt_attach_dynamic(&sc->sc_ev_cexterr, EVCNT_TYPE_MISC,
   3229 		    NULL, xname, "Carrier Extension Error");
   3230 	}
   3231 
   3232 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
   3233 	    NULL, xname, "Missed Packets");
   3234 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
   3235 	    NULL, xname, "Collision");
   3236 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
   3237 	    NULL, xname, "Sequence Error");
   3238 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
   3239 	    NULL, xname, "Receive Length Error");
   3240 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
   3241 	    NULL, xname, "Single Collision");
   3242 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
   3243 	    NULL, xname, "Excessive Collisions");
   3244 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
   3245 	    NULL, xname, "Multiple Collision");
   3246 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
   3247 	    NULL, xname, "Late Collisions");
   3248 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
   3249 	    NULL, xname, "Defer");
   3250 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
   3251 	    NULL, xname, "Good Packets Rx");
   3252 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
   3253 	    NULL, xname, "Broadcast Packets Rx");
   3254 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
   3255 	    NULL, xname, "Multicast Packets Rx");
   3256 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
   3257 	    NULL, xname, "Good Packets Tx");
   3258 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
   3259 	    NULL, xname, "Good Octets Rx");
   3260 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
   3261 	    NULL, xname, "Good Octets Tx");
   3262 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
   3263 	    NULL, xname, "Rx No Buffers");
   3264 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
   3265 	    NULL, xname, "Rx Undersize");
   3266 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
   3267 	    NULL, xname, "Rx Fragment");
   3268 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
   3269 	    NULL, xname, "Rx Oversize");
   3270 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
   3271 	    NULL, xname, "Rx Jabber");
   3272 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
   3273 	    NULL, xname, "Total Octets Rx");
   3274 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
   3275 	    NULL, xname, "Total Octets Tx");
   3276 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
   3277 	    NULL, xname, "Total Packets Rx");
   3278 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
   3279 	    NULL, xname, "Total Packets Tx");
   3280 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
   3281 	    NULL, xname, "Multicast Packets Tx");
   3282 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
   3283 	    NULL, xname, "Broadcast Packets Tx Count");
   3284 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
   3285 	    NULL, xname, "Packets Rx (64 bytes)");
   3286 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
   3287 	    NULL, xname, "Packets Rx (65-127 bytes)");
   3288 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
   3289 	    NULL, xname, "Packets Rx (128-255 bytes)");
   3290 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
   3291 	    NULL, xname, "Packets Rx (255-511 bytes)");
   3292 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
   3293 	    NULL, xname, "Packets Rx (512-1023 bytes)");
   3294 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
   3295 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
   3296 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
   3297 	    NULL, xname, "Packets Tx (64 bytes)");
   3298 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
   3299 	    NULL, xname, "Packets Tx (65-127 bytes)");
   3300 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
   3301 	    NULL, xname, "Packets Tx (128-255 bytes)");
   3302 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
   3303 	    NULL, xname, "Packets Tx (256-511 bytes)");
   3304 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
   3305 	    NULL, xname, "Packets Tx (512-1023 bytes)");
   3306 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
   3307 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
   3308 	evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
   3309 	    NULL, xname, "Interrupt Assertion");
   3310 	evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
   3311 	    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
   3312 	evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
   3313 	    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
   3314 	evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
   3315 	    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
   3316 	evcnt_attach_dynamic(&sc->sc_ev_ictxact, EVCNT_TYPE_MISC,
   3317 	    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
   3318 	evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
   3319 	    NULL, xname, "Intr. Cause Tx Queue Empty");
   3320 	evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
   3321 	    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
   3322 	evcnt_attach_dynamic(&sc->sc_ev_icrxdmtc, EVCNT_TYPE_MISC,
   3323 	    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
   3324 	evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
   3325 	    NULL, xname, "Interrupt Cause Receiver Overrun");
   3326 	if (sc->sc_type >= WM_T_82543) {
   3327 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
   3328 		    NULL, xname, "Tx with No CRS");
   3329 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
   3330 		    NULL, xname, "TCP Segmentation Context Tx");
   3331 		evcnt_attach_dynamic(&sc->sc_ev_tsctfc, EVCNT_TYPE_MISC,
   3332 		    NULL, xname, "TCP Segmentation Context Tx Fail");
   3333 	}
   3334 	if (sc->sc_type >= WM_T_82540) {
   3335 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
   3336 		    NULL, xname, "Management Packets RX");
   3337 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
   3338 		    NULL, xname, "Management Packets Dropped");
   3339 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
   3340 		    NULL, xname, "Management Packets TX");
   3341 	}
   3342 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
   3343 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
   3344 		    NULL, xname, "BMC2OS Packets received by host");
   3345 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
   3346 		    NULL, xname, "OS2BMC Packets transmitted by host");
   3347 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
   3348 		    NULL, xname, "BMC2OS Packets sent by BMC");
   3349 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
   3350 		    NULL, xname, "OS2BMC Packets received by BMC");
   3351 	}
   3352 #endif /* WM_EVENT_COUNTERS */
   3353 
   3354 	sc->sc_txrx_use_workqueue = false;
   3355 
   3356 	if (wm_phy_need_linkdown_discard(sc)) {
   3357 		DPRINTF(sc, WM_DEBUG_LINK,
   3358 		    ("%s: %s: Set linkdown discard flag\n",
   3359 			device_xname(sc->sc_dev), __func__));
   3360 		wm_set_linkdown_discard(sc);
   3361 	}
   3362 
   3363 	wm_init_sysctls(sc);
   3364 
   3365 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3366 		pmf_class_network_register(self, ifp);
   3367 	else
   3368 		aprint_error_dev(self, "couldn't establish power handler\n");
   3369 
   3370 	sc->sc_flags |= WM_F_ATTACHED;
   3371 out:
   3372 	return;
   3373 }
   3374 
   3375 /* The detach function (ca_detach) */
   3376 static int
   3377 wm_detach(device_t self, int flags __unused)
   3378 {
   3379 	struct wm_softc *sc = device_private(self);
   3380 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3381 	int i;
   3382 
   3383 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3384 		return 0;
   3385 
   3386 	/* Stop the interface. Callouts are stopped in it. */
   3387 	wm_stop(ifp, 1);
   3388 
   3389 	pmf_device_deregister(self);
   3390 
   3391 	sysctl_teardown(&sc->sc_sysctllog);
   3392 
   3393 #ifdef WM_EVENT_COUNTERS
   3394 	evcnt_detach(&sc->sc_ev_linkintr);
   3395 
   3396 	if (sc->sc_type >= WM_T_82542_2_1) {
   3397 		evcnt_detach(&sc->sc_ev_tx_xoff);
   3398 		evcnt_detach(&sc->sc_ev_tx_xon);
   3399 		evcnt_detach(&sc->sc_ev_rx_xoff);
   3400 		evcnt_detach(&sc->sc_ev_rx_xon);
   3401 		evcnt_detach(&sc->sc_ev_rx_macctl);
   3402 	}
   3403 
   3404 	evcnt_detach(&sc->sc_ev_crcerrs);
   3405 	evcnt_detach(&sc->sc_ev_symerrc);
   3406 
   3407 	if (sc->sc_type >= WM_T_82543) {
   3408 		evcnt_detach(&sc->sc_ev_algnerrc);
   3409 		evcnt_detach(&sc->sc_ev_rxerrc);
   3410 		evcnt_detach(&sc->sc_ev_cexterr);
   3411 	}
   3412 	evcnt_detach(&sc->sc_ev_mpc);
   3413 	evcnt_detach(&sc->sc_ev_colc);
   3414 	evcnt_detach(&sc->sc_ev_sec);
   3415 	evcnt_detach(&sc->sc_ev_rlec);
   3416 	evcnt_detach(&sc->sc_ev_scc);
   3417 	evcnt_detach(&sc->sc_ev_ecol);
   3418 	evcnt_detach(&sc->sc_ev_mcc);
   3419 	evcnt_detach(&sc->sc_ev_latecol);
   3420 	evcnt_detach(&sc->sc_ev_dc);
   3421 	evcnt_detach(&sc->sc_ev_gprc);
   3422 	evcnt_detach(&sc->sc_ev_bprc);
   3423 	evcnt_detach(&sc->sc_ev_mprc);
   3424 	evcnt_detach(&sc->sc_ev_gptc);
   3425 	evcnt_detach(&sc->sc_ev_gorc);
   3426 	evcnt_detach(&sc->sc_ev_gotc);
   3427 	evcnt_detach(&sc->sc_ev_rnbc);
   3428 	evcnt_detach(&sc->sc_ev_ruc);
   3429 	evcnt_detach(&sc->sc_ev_rfc);
   3430 	evcnt_detach(&sc->sc_ev_roc);
   3431 	evcnt_detach(&sc->sc_ev_rjc);
   3432 	evcnt_detach(&sc->sc_ev_tor);
   3433 	evcnt_detach(&sc->sc_ev_tot);
   3434 	evcnt_detach(&sc->sc_ev_tpr);
   3435 	evcnt_detach(&sc->sc_ev_tpt);
   3436 	evcnt_detach(&sc->sc_ev_mptc);
   3437 	evcnt_detach(&sc->sc_ev_bptc);
   3438 	evcnt_detach(&sc->sc_ev_prc64);
   3439 	evcnt_detach(&sc->sc_ev_prc127);
   3440 	evcnt_detach(&sc->sc_ev_prc255);
   3441 	evcnt_detach(&sc->sc_ev_prc511);
   3442 	evcnt_detach(&sc->sc_ev_prc1023);
   3443 	evcnt_detach(&sc->sc_ev_prc1522);
   3444 	evcnt_detach(&sc->sc_ev_ptc64);
   3445 	evcnt_detach(&sc->sc_ev_ptc127);
   3446 	evcnt_detach(&sc->sc_ev_ptc255);
   3447 	evcnt_detach(&sc->sc_ev_ptc511);
   3448 	evcnt_detach(&sc->sc_ev_ptc1023);
   3449 	evcnt_detach(&sc->sc_ev_ptc1522);
   3450 	evcnt_detach(&sc->sc_ev_iac);
   3451 	evcnt_detach(&sc->sc_ev_icrxptc);
   3452 	evcnt_detach(&sc->sc_ev_icrxatc);
   3453 	evcnt_detach(&sc->sc_ev_ictxptc);
   3454 	evcnt_detach(&sc->sc_ev_ictxact);
   3455 	evcnt_detach(&sc->sc_ev_ictxqec);
   3456 	evcnt_detach(&sc->sc_ev_ictxqmtc);
   3457 	evcnt_detach(&sc->sc_ev_icrxdmtc);
   3458 	evcnt_detach(&sc->sc_ev_icrxoc);
   3459 	if (sc->sc_type >= WM_T_82543) {
   3460 		evcnt_detach(&sc->sc_ev_tncrs);
   3461 		evcnt_detach(&sc->sc_ev_tsctc);
   3462 		evcnt_detach(&sc->sc_ev_tsctfc);
   3463 	}
   3464 	if (sc->sc_type >= WM_T_82540) {
   3465 		evcnt_detach(&sc->sc_ev_mgtprc);
   3466 		evcnt_detach(&sc->sc_ev_mgtpdc);
   3467 		evcnt_detach(&sc->sc_ev_mgtptc);
   3468 	}
   3469 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
   3470 		evcnt_detach(&sc->sc_ev_b2ogprc);
   3471 		evcnt_detach(&sc->sc_ev_o2bspc);
   3472 		evcnt_detach(&sc->sc_ev_b2ospc);
   3473 		evcnt_detach(&sc->sc_ev_o2bgptc);
   3474 	}
   3475 #endif /* WM_EVENT_COUNTERS */
   3476 
   3477 	rnd_detach_source(&sc->rnd_source);
   3478 
   3479 	/* Tell the firmware about the release */
   3480 	WM_CORE_LOCK(sc);
   3481 	wm_release_manageability(sc);
   3482 	wm_release_hw_control(sc);
   3483 	wm_enable_wakeup(sc);
   3484 	WM_CORE_UNLOCK(sc);
   3485 
   3486 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3487 
   3488 	ether_ifdetach(ifp);
   3489 	if_detach(ifp);
   3490 	if_percpuq_destroy(sc->sc_ipq);
   3491 
   3492 	/* Delete all remaining media. */
   3493 	ifmedia_fini(&sc->sc_mii.mii_media);
   3494 
   3495 	/* Unload RX dmamaps and free mbufs */
   3496 	for (i = 0; i < sc->sc_nqueues; i++) {
   3497 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3498 		mutex_enter(rxq->rxq_lock);
   3499 		wm_rxdrain(rxq);
   3500 		mutex_exit(rxq->rxq_lock);
   3501 	}
   3502 	/* Must unlock here */
   3503 
   3504 	/* Disestablish the interrupt handler */
   3505 	for (i = 0; i < sc->sc_nintrs; i++) {
   3506 		if (sc->sc_ihs[i] != NULL) {
   3507 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3508 			sc->sc_ihs[i] = NULL;
   3509 		}
   3510 	}
   3511 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3512 
   3513 	/* wm_stop() ensured that the workqueue is stopped. */
   3514 	workqueue_destroy(sc->sc_queue_wq);
   3515 
   3516 	for (i = 0; i < sc->sc_nqueues; i++)
   3517 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3518 
   3519 	wm_free_txrx_queues(sc);
   3520 
   3521 	/* Unmap the registers */
   3522 	if (sc->sc_ss) {
   3523 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3524 		sc->sc_ss = 0;
   3525 	}
   3526 	if (sc->sc_ios) {
   3527 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3528 		sc->sc_ios = 0;
   3529 	}
   3530 	if (sc->sc_flashs) {
   3531 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3532 		sc->sc_flashs = 0;
   3533 	}
   3534 
   3535 	if (sc->sc_core_lock)
   3536 		mutex_obj_free(sc->sc_core_lock);
   3537 	if (sc->sc_ich_phymtx)
   3538 		mutex_obj_free(sc->sc_ich_phymtx);
   3539 	if (sc->sc_ich_nvmmtx)
   3540 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3541 
   3542 	return 0;
   3543 }
   3544 
   3545 static bool
   3546 wm_suspend(device_t self, const pmf_qual_t *qual)
   3547 {
   3548 	struct wm_softc *sc = device_private(self);
   3549 
   3550 	wm_release_manageability(sc);
   3551 	wm_release_hw_control(sc);
   3552 	wm_enable_wakeup(sc);
   3553 
   3554 	return true;
   3555 }
   3556 
   3557 static bool
   3558 wm_resume(device_t self, const pmf_qual_t *qual)
   3559 {
   3560 	struct wm_softc *sc = device_private(self);
   3561 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3562 	pcireg_t reg;
   3563 	char buf[256];
   3564 
   3565 	reg = CSR_READ(sc, WMREG_WUS);
   3566 	if (reg != 0) {
   3567 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3568 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3569 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3570 	}
   3571 
   3572 	if (sc->sc_type >= WM_T_PCH2)
   3573 		wm_resume_workarounds_pchlan(sc);
   3574 	if ((ifp->if_flags & IFF_UP) == 0) {
   3575 		/* >= PCH_SPT hardware workaround before reset. */
   3576 		if (sc->sc_type >= WM_T_PCH_SPT)
   3577 			wm_flush_desc_rings(sc);
   3578 
   3579 		wm_reset(sc);
   3580 		/* Non-AMT based hardware can now take control from firmware */
   3581 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3582 			wm_get_hw_control(sc);
   3583 		wm_init_manageability(sc);
   3584 	} else {
   3585 		/*
   3586 		 * We called pmf_class_network_register(), so if_init() is
   3587 		 * automatically called when IFF_UP. wm_reset(),
   3588 		 * wm_get_hw_control() and wm_init_manageability() are called
   3589 		 * via wm_init().
   3590 		 */
   3591 	}
   3592 
   3593 	return true;
   3594 }
   3595 
   3596 /*
   3597  * wm_watchdog:		[ifnet interface function]
   3598  *
   3599  *	Watchdog timer handler.
   3600  */
   3601 static void
   3602 wm_watchdog(struct ifnet *ifp)
   3603 {
   3604 	int qid;
   3605 	struct wm_softc *sc = ifp->if_softc;
   3606 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3607 
   3608 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3609 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3610 
   3611 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3612 	}
   3613 
   3614 	/* IF any of queues hanged up, reset the interface. */
   3615 	if (hang_queue != 0) {
   3616 		(void)wm_init(ifp);
   3617 
   3618 		/*
   3619 		 * There are still some upper layer processing which call
   3620 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3621 		 */
   3622 		/* Try to get more packets going. */
   3623 		ifp->if_start(ifp);
   3624 	}
   3625 }
   3626 
   3627 
   3628 static void
   3629 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3630 {
   3631 
   3632 	mutex_enter(txq->txq_lock);
   3633 	if (txq->txq_sending &&
   3634 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3635 		wm_watchdog_txq_locked(ifp, txq, hang);
   3636 
   3637 	mutex_exit(txq->txq_lock);
   3638 }
   3639 
   3640 static void
   3641 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3642     uint16_t *hang)
   3643 {
   3644 	struct wm_softc *sc = ifp->if_softc;
   3645 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3646 
   3647 	KASSERT(mutex_owned(txq->txq_lock));
   3648 
   3649 	/*
   3650 	 * Since we're using delayed interrupts, sweep up
   3651 	 * before we report an error.
   3652 	 */
   3653 	wm_txeof(txq, UINT_MAX);
   3654 
   3655 	if (txq->txq_sending)
   3656 		*hang |= __BIT(wmq->wmq_id);
   3657 
   3658 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3659 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3660 		    device_xname(sc->sc_dev));
   3661 	} else {
   3662 #ifdef WM_DEBUG
   3663 		int i, j;
   3664 		struct wm_txsoft *txs;
   3665 #endif
   3666 		log(LOG_ERR,
   3667 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3668 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3669 		    txq->txq_next);
   3670 		if_statinc(ifp, if_oerrors);
   3671 #ifdef WM_DEBUG
   3672 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3673 		    i = WM_NEXTTXS(txq, i)) {
   3674 			txs = &txq->txq_soft[i];
   3675 			printf("txs %d tx %d -> %d\n",
   3676 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3677 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3678 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3679 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3680 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3681 					printf("\t %#08x%08x\n",
   3682 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3683 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3684 				} else {
   3685 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3686 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3687 					    txq->txq_descs[j].wtx_addr.wa_low);
   3688 					printf("\t %#04x%02x%02x%08x\n",
   3689 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3690 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3691 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3692 					    txq->txq_descs[j].wtx_cmdlen);
   3693 				}
   3694 				if (j == txs->txs_lastdesc)
   3695 					break;
   3696 			}
   3697 		}
   3698 #endif
   3699 	}
   3700 }
   3701 
   3702 /*
   3703  * wm_tick:
   3704  *
   3705  *	One second timer, used to check link status, sweep up
   3706  *	completed transmit jobs, etc.
   3707  */
   3708 static void
   3709 wm_tick(void *arg)
   3710 {
   3711 	struct wm_softc *sc = arg;
   3712 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3713 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
   3714 	    cexterr;
   3715 #ifndef WM_MPSAFE
   3716 	int s = splnet();
   3717 #endif
   3718 
   3719 	WM_CORE_LOCK(sc);
   3720 
   3721 	if (sc->sc_core_stopping) {
   3722 		WM_CORE_UNLOCK(sc);
   3723 #ifndef WM_MPSAFE
   3724 		splx(s);
   3725 #endif
   3726 		return;
   3727 	}
   3728 
   3729 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
   3730 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
   3731 	mpc = CSR_READ(sc, WMREG_MPC);
   3732 	colc = CSR_READ(sc, WMREG_COLC);
   3733 	sec = CSR_READ(sc, WMREG_SEC);
   3734 	rlec = CSR_READ(sc, WMREG_RLEC);
   3735 
   3736 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
   3737 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
   3738 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
   3739 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
   3740 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
   3741 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
   3742 
   3743 	if (sc->sc_type >= WM_T_82542_2_1) {
   3744 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3745 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3746 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3747 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3748 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3749 	}
   3750 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
   3751 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
   3752 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
   3753 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
   3754 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
   3755 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
   3756 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
   3757 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
   3758 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
   3759 
   3760 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
   3761 	    CSR_READ(sc, WMREG_GORCL) + CSR_READ(sc, WMREG_GORCH));
   3762 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
   3763 	    CSR_READ(sc, WMREG_GOTCL) + CSR_READ(sc, WMREG_GOTCH));
   3764 
   3765 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
   3766 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
   3767 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
   3768 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
   3769 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
   3770 
   3771 	WM_EVCNT_ADD(&sc->sc_ev_tor,
   3772 	    CSR_READ(sc, WMREG_TORL) + CSR_READ(sc, WMREG_TORH));
   3773 	WM_EVCNT_ADD(&sc->sc_ev_tot,
   3774 	    CSR_READ(sc, WMREG_TOTL) + CSR_READ(sc, WMREG_TOTH));
   3775 
   3776 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
   3777 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
   3778 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
   3779 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
   3780 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
   3781 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
   3782 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
   3783 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
   3784 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
   3785 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
   3786 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
   3787 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
   3788 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
   3789 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
   3790 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
   3791 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
   3792 	WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
   3793 	WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
   3794 	WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
   3795 	WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
   3796 	WM_EVCNT_ADD(&sc->sc_ev_ictxact, CSR_READ(sc, WMREG_ICTXATC));
   3797 	WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
   3798 	WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc, CSR_READ(sc, WMREG_ICTXQMTC));
   3799 	WM_EVCNT_ADD(&sc->sc_ev_icrxdmtc, CSR_READ(sc, WMREG_ICRXDMTC));
   3800 	WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
   3801 
   3802 	if (sc->sc_type >= WM_T_82543) {
   3803 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
   3804 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
   3805 		cexterr = CSR_READ(sc, WMREG_CEXTERR);
   3806 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
   3807 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
   3808 		WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
   3809 
   3810 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
   3811 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
   3812 		WM_EVCNT_ADD(&sc->sc_ev_tsctfc, CSR_READ(sc, WMREG_TSCTFC));
   3813 	} else
   3814 		algnerrc = rxerrc = cexterr = 0;
   3815 
   3816 	if (sc->sc_type >= WM_T_82540) {
   3817 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
   3818 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
   3819 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
   3820 	}
   3821 	if (((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003))
   3822 	    && ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0)) {
   3823 		WM_EVCNT_ADD(&sc->sc_ev_b2ogprc, CSR_READ(sc, WMREG_B2OGPRC));
   3824 		WM_EVCNT_ADD(&sc->sc_ev_o2bspc, CSR_READ(sc, WMREG_O2BSPC));
   3825 		WM_EVCNT_ADD(&sc->sc_ev_b2ospc, CSR_READ(sc, WMREG_B2OSPC));
   3826 		WM_EVCNT_ADD(&sc->sc_ev_o2bgptc, CSR_READ(sc, WMREG_O2BGPTC));
   3827 	}
   3828 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3829 	if_statadd_ref(nsr, if_collisions, colc);
   3830 	if_statadd_ref(nsr, if_ierrors,
   3831 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
   3832 	/*
   3833 	 * WMREG_RNBC is incremented when there are no available buffers in host
   3834 	 * memory. It does not mean the number of dropped packets, because an
   3835 	 * Ethernet controller can receive packets in such case if there is
   3836 	 * space in the phy's FIFO.
   3837 	 *
   3838 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3839 	 * own EVCNT instead of if_iqdrops.
   3840 	 */
   3841 	if_statadd_ref(nsr, if_iqdrops, mpc);
   3842 	IF_STAT_PUTREF(ifp);
   3843 
   3844 	if (sc->sc_flags & WM_F_HAS_MII)
   3845 		mii_tick(&sc->sc_mii);
   3846 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3847 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3848 		wm_serdes_tick(sc);
   3849 	else
   3850 		wm_tbi_tick(sc);
   3851 
   3852 	WM_CORE_UNLOCK(sc);
   3853 #ifndef WM_MPSAFE
   3854 	splx(s);
   3855 #endif
   3856 
   3857 	wm_watchdog(ifp);
   3858 
   3859 	callout_schedule(&sc->sc_tick_ch, hz);
   3860 }
   3861 
   3862 static int
   3863 wm_ifflags_cb(struct ethercom *ec)
   3864 {
   3865 	struct ifnet *ifp = &ec->ec_if;
   3866 	struct wm_softc *sc = ifp->if_softc;
   3867 	u_short iffchange;
   3868 	int ecchange;
   3869 	bool needreset = false;
   3870 	int rc = 0;
   3871 
   3872 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3873 		device_xname(sc->sc_dev), __func__));
   3874 
   3875 	KASSERT(IFNET_LOCKED(ifp));
   3876 	WM_CORE_LOCK(sc);
   3877 
   3878 	/*
   3879 	 * Check for if_flags.
   3880 	 * Main usage is to prevent linkdown when opening bpf.
   3881 	 */
   3882 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3883 	sc->sc_if_flags = ifp->if_flags;
   3884 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3885 		needreset = true;
   3886 		goto ec;
   3887 	}
   3888 
   3889 	/* iff related updates */
   3890 	if ((iffchange & IFF_PROMISC) != 0)
   3891 		wm_set_filter(sc);
   3892 
   3893 	wm_set_vlan(sc);
   3894 
   3895 ec:
   3896 	/* Check for ec_capenable. */
   3897 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3898 	sc->sc_ec_capenable = ec->ec_capenable;
   3899 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3900 		needreset = true;
   3901 		goto out;
   3902 	}
   3903 
   3904 	/* ec related updates */
   3905 	wm_set_eee(sc);
   3906 
   3907 out:
   3908 	if (needreset)
   3909 		rc = ENETRESET;
   3910 	WM_CORE_UNLOCK(sc);
   3911 
   3912 	return rc;
   3913 }
   3914 
   3915 static bool
   3916 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3917 {
   3918 
   3919 	switch (sc->sc_phytype) {
   3920 	case WMPHY_82577: /* ihphy */
   3921 	case WMPHY_82578: /* atphy */
   3922 	case WMPHY_82579: /* ihphy */
   3923 	case WMPHY_I217: /* ihphy */
   3924 	case WMPHY_82580: /* ihphy */
   3925 	case WMPHY_I350: /* ihphy */
   3926 		return true;
   3927 	default:
   3928 		return false;
   3929 	}
   3930 }
   3931 
   3932 static void
   3933 wm_set_linkdown_discard(struct wm_softc *sc)
   3934 {
   3935 
   3936 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3937 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3938 
   3939 		mutex_enter(txq->txq_lock);
   3940 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3941 		mutex_exit(txq->txq_lock);
   3942 	}
   3943 }
   3944 
   3945 static void
   3946 wm_clear_linkdown_discard(struct wm_softc *sc)
   3947 {
   3948 
   3949 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3950 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3951 
   3952 		mutex_enter(txq->txq_lock);
   3953 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   3954 		mutex_exit(txq->txq_lock);
   3955 	}
   3956 }
   3957 
   3958 /*
   3959  * wm_ioctl:		[ifnet interface function]
   3960  *
   3961  *	Handle control requests from the operator.
   3962  */
   3963 static int
   3964 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3965 {
   3966 	struct wm_softc *sc = ifp->if_softc;
   3967 	struct ifreq *ifr = (struct ifreq *)data;
   3968 	struct ifaddr *ifa = (struct ifaddr *)data;
   3969 	struct sockaddr_dl *sdl;
   3970 	int error;
   3971 
   3972 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3973 		device_xname(sc->sc_dev), __func__));
   3974 
   3975 	switch (cmd) {
   3976 	case SIOCADDMULTI:
   3977 	case SIOCDELMULTI:
   3978 		break;
   3979 	default:
   3980 		KASSERT(IFNET_LOCKED(ifp));
   3981 	}
   3982 
   3983 #ifndef WM_MPSAFE
   3984 	const int s = splnet();
   3985 #endif
   3986 	switch (cmd) {
   3987 	case SIOCSIFMEDIA:
   3988 		WM_CORE_LOCK(sc);
   3989 		/* Flow control requires full-duplex mode. */
   3990 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3991 		    (ifr->ifr_media & IFM_FDX) == 0)
   3992 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3993 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3994 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3995 				/* We can do both TXPAUSE and RXPAUSE. */
   3996 				ifr->ifr_media |=
   3997 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3998 			}
   3999 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   4000 		}
   4001 		WM_CORE_UNLOCK(sc);
   4002 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   4003 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   4004 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
   4005 				DPRINTF(sc, WM_DEBUG_LINK,
   4006 				    ("%s: %s: Set linkdown discard flag\n",
   4007 					device_xname(sc->sc_dev), __func__));
   4008 				wm_set_linkdown_discard(sc);
   4009 			}
   4010 		}
   4011 		break;
   4012 	case SIOCINITIFADDR:
   4013 		WM_CORE_LOCK(sc);
   4014 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   4015 			sdl = satosdl(ifp->if_dl->ifa_addr);
   4016 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   4017 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   4018 			/* Unicast address is the first multicast entry */
   4019 			wm_set_filter(sc);
   4020 			error = 0;
   4021 			WM_CORE_UNLOCK(sc);
   4022 			break;
   4023 		}
   4024 		WM_CORE_UNLOCK(sc);
   4025 		/*FALLTHROUGH*/
   4026 	default:
   4027 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   4028 			if (((ifp->if_flags & IFF_UP) != 0) &&
   4029 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
   4030 				DPRINTF(sc, WM_DEBUG_LINK,
   4031 				    ("%s: %s: Set linkdown discard flag\n",
   4032 					device_xname(sc->sc_dev), __func__));
   4033 				wm_set_linkdown_discard(sc);
   4034 			}
   4035 		}
   4036 #ifdef WM_MPSAFE
   4037 		const int s = splnet();
   4038 #endif
   4039 		/* It may call wm_start, so unlock here */
   4040 		error = ether_ioctl(ifp, cmd, data);
   4041 #ifdef WM_MPSAFE
   4042 		splx(s);
   4043 #endif
   4044 		if (error != ENETRESET)
   4045 			break;
   4046 
   4047 		error = 0;
   4048 
   4049 		if (cmd == SIOCSIFCAP)
   4050 			error = if_init(ifp);
   4051 		else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
   4052 			WM_CORE_LOCK(sc);
   4053 			if (sc->sc_if_flags & IFF_RUNNING) {
   4054 				/*
   4055 				 * Multicast list has changed; set the hardware filter
   4056 				 * accordingly.
   4057 				 */
   4058 				wm_set_filter(sc);
   4059 			}
   4060 			WM_CORE_UNLOCK(sc);
   4061 		}
   4062 		break;
   4063 	}
   4064 
   4065 #ifndef WM_MPSAFE
   4066 	splx(s);
   4067 #endif
   4068 	return error;
   4069 }
   4070 
   4071 /* MAC address related */
   4072 
   4073 /*
   4074  * Get the offset of MAC address and return it.
   4075  * If error occured, use offset 0.
   4076  */
   4077 static uint16_t
   4078 wm_check_alt_mac_addr(struct wm_softc *sc)
   4079 {
   4080 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4081 	uint16_t offset = NVM_OFF_MACADDR;
   4082 
   4083 	/* Try to read alternative MAC address pointer */
   4084 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   4085 		return 0;
   4086 
   4087 	/* Check pointer if it's valid or not. */
   4088 	if ((offset == 0x0000) || (offset == 0xffff))
   4089 		return 0;
   4090 
   4091 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   4092 	/*
   4093 	 * Check whether alternative MAC address is valid or not.
   4094 	 * Some cards have non 0xffff pointer but those don't use
   4095 	 * alternative MAC address in reality.
   4096 	 *
   4097 	 * Check whether the broadcast bit is set or not.
   4098 	 */
   4099 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   4100 		if (((myea[0] & 0xff) & 0x01) == 0)
   4101 			return offset; /* Found */
   4102 
   4103 	/* Not found */
   4104 	return 0;
   4105 }
   4106 
   4107 static int
   4108 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   4109 {
   4110 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4111 	uint16_t offset = NVM_OFF_MACADDR;
   4112 	int do_invert = 0;
   4113 
   4114 	switch (sc->sc_type) {
   4115 	case WM_T_82580:
   4116 	case WM_T_I350:
   4117 	case WM_T_I354:
   4118 		/* EEPROM Top Level Partitioning */
   4119 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   4120 		break;
   4121 	case WM_T_82571:
   4122 	case WM_T_82575:
   4123 	case WM_T_82576:
   4124 	case WM_T_80003:
   4125 	case WM_T_I210:
   4126 	case WM_T_I211:
   4127 		offset = wm_check_alt_mac_addr(sc);
   4128 		if (offset == 0)
   4129 			if ((sc->sc_funcid & 0x01) == 1)
   4130 				do_invert = 1;
   4131 		break;
   4132 	default:
   4133 		if ((sc->sc_funcid & 0x01) == 1)
   4134 			do_invert = 1;
   4135 		break;
   4136 	}
   4137 
   4138 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   4139 		goto bad;
   4140 
   4141 	enaddr[0] = myea[0] & 0xff;
   4142 	enaddr[1] = myea[0] >> 8;
   4143 	enaddr[2] = myea[1] & 0xff;
   4144 	enaddr[3] = myea[1] >> 8;
   4145 	enaddr[4] = myea[2] & 0xff;
   4146 	enaddr[5] = myea[2] >> 8;
   4147 
   4148 	/*
   4149 	 * Toggle the LSB of the MAC address on the second port
   4150 	 * of some dual port cards.
   4151 	 */
   4152 	if (do_invert != 0)
   4153 		enaddr[5] ^= 1;
   4154 
   4155 	return 0;
   4156 
   4157  bad:
   4158 	return -1;
   4159 }
   4160 
   4161 /*
   4162  * wm_set_ral:
   4163  *
   4164  *	Set an entery in the receive address list.
   4165  */
   4166 static void
   4167 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   4168 {
   4169 	uint32_t ral_lo, ral_hi, addrl, addrh;
   4170 	uint32_t wlock_mac;
   4171 	int rv;
   4172 
   4173 	if (enaddr != NULL) {
   4174 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   4175 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   4176 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   4177 		ral_hi |= RAL_AV;
   4178 	} else {
   4179 		ral_lo = 0;
   4180 		ral_hi = 0;
   4181 	}
   4182 
   4183 	switch (sc->sc_type) {
   4184 	case WM_T_82542_2_0:
   4185 	case WM_T_82542_2_1:
   4186 	case WM_T_82543:
   4187 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   4188 		CSR_WRITE_FLUSH(sc);
   4189 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   4190 		CSR_WRITE_FLUSH(sc);
   4191 		break;
   4192 	case WM_T_PCH2:
   4193 	case WM_T_PCH_LPT:
   4194 	case WM_T_PCH_SPT:
   4195 	case WM_T_PCH_CNP:
   4196 		if (idx == 0) {
   4197 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4198 			CSR_WRITE_FLUSH(sc);
   4199 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4200 			CSR_WRITE_FLUSH(sc);
   4201 			return;
   4202 		}
   4203 		if (sc->sc_type != WM_T_PCH2) {
   4204 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   4205 			    FWSM_WLOCK_MAC);
   4206 			addrl = WMREG_SHRAL(idx - 1);
   4207 			addrh = WMREG_SHRAH(idx - 1);
   4208 		} else {
   4209 			wlock_mac = 0;
   4210 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   4211 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   4212 		}
   4213 
   4214 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   4215 			rv = wm_get_swflag_ich8lan(sc);
   4216 			if (rv != 0)
   4217 				return;
   4218 			CSR_WRITE(sc, addrl, ral_lo);
   4219 			CSR_WRITE_FLUSH(sc);
   4220 			CSR_WRITE(sc, addrh, ral_hi);
   4221 			CSR_WRITE_FLUSH(sc);
   4222 			wm_put_swflag_ich8lan(sc);
   4223 		}
   4224 
   4225 		break;
   4226 	default:
   4227 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4228 		CSR_WRITE_FLUSH(sc);
   4229 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4230 		CSR_WRITE_FLUSH(sc);
   4231 		break;
   4232 	}
   4233 }
   4234 
   4235 /*
   4236  * wm_mchash:
   4237  *
   4238  *	Compute the hash of the multicast address for the 4096-bit
   4239  *	multicast filter.
   4240  */
   4241 static uint32_t
   4242 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   4243 {
   4244 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   4245 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   4246 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   4247 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   4248 	uint32_t hash;
   4249 
   4250 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4251 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4252 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4253 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4254 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   4255 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   4256 		return (hash & 0x3ff);
   4257 	}
   4258 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   4259 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   4260 
   4261 	return (hash & 0xfff);
   4262 }
   4263 
   4264 /*
   4265  *
   4266  *
   4267  */
   4268 static int
   4269 wm_rar_count(struct wm_softc *sc)
   4270 {
   4271 	int size;
   4272 
   4273 	switch (sc->sc_type) {
   4274 	case WM_T_ICH8:
   4275 		size = WM_RAL_TABSIZE_ICH8 -1;
   4276 		break;
   4277 	case WM_T_ICH9:
   4278 	case WM_T_ICH10:
   4279 	case WM_T_PCH:
   4280 		size = WM_RAL_TABSIZE_ICH8;
   4281 		break;
   4282 	case WM_T_PCH2:
   4283 		size = WM_RAL_TABSIZE_PCH2;
   4284 		break;
   4285 	case WM_T_PCH_LPT:
   4286 	case WM_T_PCH_SPT:
   4287 	case WM_T_PCH_CNP:
   4288 		size = WM_RAL_TABSIZE_PCH_LPT;
   4289 		break;
   4290 	case WM_T_82575:
   4291 	case WM_T_I210:
   4292 	case WM_T_I211:
   4293 		size = WM_RAL_TABSIZE_82575;
   4294 		break;
   4295 	case WM_T_82576:
   4296 	case WM_T_82580:
   4297 		size = WM_RAL_TABSIZE_82576;
   4298 		break;
   4299 	case WM_T_I350:
   4300 	case WM_T_I354:
   4301 		size = WM_RAL_TABSIZE_I350;
   4302 		break;
   4303 	default:
   4304 		size = WM_RAL_TABSIZE;
   4305 	}
   4306 
   4307 	return size;
   4308 }
   4309 
   4310 /*
   4311  * wm_set_filter:
   4312  *
   4313  *	Set up the receive filter.
   4314  */
   4315 static void
   4316 wm_set_filter(struct wm_softc *sc)
   4317 {
   4318 	struct ethercom *ec = &sc->sc_ethercom;
   4319 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   4320 	struct ether_multi *enm;
   4321 	struct ether_multistep step;
   4322 	bus_addr_t mta_reg;
   4323 	uint32_t hash, reg, bit;
   4324 	int i, size, ralmax, rv;
   4325 
   4326 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4327 		device_xname(sc->sc_dev), __func__));
   4328 
   4329 	if (sc->sc_type >= WM_T_82544)
   4330 		mta_reg = WMREG_CORDOVA_MTA;
   4331 	else
   4332 		mta_reg = WMREG_MTA;
   4333 
   4334 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   4335 
   4336 	if (ifp->if_flags & IFF_BROADCAST)
   4337 		sc->sc_rctl |= RCTL_BAM;
   4338 	if (ifp->if_flags & IFF_PROMISC) {
   4339 		sc->sc_rctl |= RCTL_UPE;
   4340 		ETHER_LOCK(ec);
   4341 		ec->ec_flags |= ETHER_F_ALLMULTI;
   4342 		ETHER_UNLOCK(ec);
   4343 		goto allmulti;
   4344 	}
   4345 
   4346 	/*
   4347 	 * Set the station address in the first RAL slot, and
   4348 	 * clear the remaining slots.
   4349 	 */
   4350 	size = wm_rar_count(sc);
   4351 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   4352 
   4353 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   4354 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   4355 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   4356 		switch (i) {
   4357 		case 0:
   4358 			/* We can use all entries */
   4359 			ralmax = size;
   4360 			break;
   4361 		case 1:
   4362 			/* Only RAR[0] */
   4363 			ralmax = 1;
   4364 			break;
   4365 		default:
   4366 			/* Available SHRA + RAR[0] */
   4367 			ralmax = i + 1;
   4368 		}
   4369 	} else
   4370 		ralmax = size;
   4371 	for (i = 1; i < size; i++) {
   4372 		if (i < ralmax)
   4373 			wm_set_ral(sc, NULL, i);
   4374 	}
   4375 
   4376 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4377 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4378 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4379 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   4380 		size = WM_ICH8_MC_TABSIZE;
   4381 	else
   4382 		size = WM_MC_TABSIZE;
   4383 	/* Clear out the multicast table. */
   4384 	for (i = 0; i < size; i++) {
   4385 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4386 		CSR_WRITE_FLUSH(sc);
   4387 	}
   4388 
   4389 	ETHER_LOCK(ec);
   4390 	ETHER_FIRST_MULTI(step, ec, enm);
   4391 	while (enm != NULL) {
   4392 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4393 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4394 			ETHER_UNLOCK(ec);
   4395 			/*
   4396 			 * We must listen to a range of multicast addresses.
   4397 			 * For now, just accept all multicasts, rather than
   4398 			 * trying to set only those filter bits needed to match
   4399 			 * the range.  (At this time, the only use of address
   4400 			 * ranges is for IP multicast routing, for which the
   4401 			 * range is big enough to require all bits set.)
   4402 			 */
   4403 			goto allmulti;
   4404 		}
   4405 
   4406 		hash = wm_mchash(sc, enm->enm_addrlo);
   4407 
   4408 		reg = (hash >> 5);
   4409 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4410 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4411 		    || (sc->sc_type == WM_T_PCH2)
   4412 		    || (sc->sc_type == WM_T_PCH_LPT)
   4413 		    || (sc->sc_type == WM_T_PCH_SPT)
   4414 		    || (sc->sc_type == WM_T_PCH_CNP))
   4415 			reg &= 0x1f;
   4416 		else
   4417 			reg &= 0x7f;
   4418 		bit = hash & 0x1f;
   4419 
   4420 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4421 		hash |= 1U << bit;
   4422 
   4423 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4424 			/*
   4425 			 * 82544 Errata 9: Certain register cannot be written
   4426 			 * with particular alignments in PCI-X bus operation
   4427 			 * (FCAH, MTA and VFTA).
   4428 			 */
   4429 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4430 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4431 			CSR_WRITE_FLUSH(sc);
   4432 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4433 			CSR_WRITE_FLUSH(sc);
   4434 		} else {
   4435 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4436 			CSR_WRITE_FLUSH(sc);
   4437 		}
   4438 
   4439 		ETHER_NEXT_MULTI(step, enm);
   4440 	}
   4441 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4442 	ETHER_UNLOCK(ec);
   4443 
   4444 	goto setit;
   4445 
   4446  allmulti:
   4447 	sc->sc_rctl |= RCTL_MPE;
   4448 
   4449  setit:
   4450 	if (sc->sc_type >= WM_T_PCH2) {
   4451 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4452 		    && (ifp->if_mtu > ETHERMTU))
   4453 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4454 		else
   4455 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4456 		if (rv != 0)
   4457 			device_printf(sc->sc_dev,
   4458 			    "Failed to do workaround for jumbo frame.\n");
   4459 	}
   4460 
   4461 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4462 }
   4463 
   4464 /* Reset and init related */
   4465 
   4466 static void
   4467 wm_set_vlan(struct wm_softc *sc)
   4468 {
   4469 
   4470 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4471 		device_xname(sc->sc_dev), __func__));
   4472 
   4473 	/* Deal with VLAN enables. */
   4474 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4475 		sc->sc_ctrl |= CTRL_VME;
   4476 	else
   4477 		sc->sc_ctrl &= ~CTRL_VME;
   4478 
   4479 	/* Write the control registers. */
   4480 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4481 }
   4482 
   4483 static void
   4484 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4485 {
   4486 	uint32_t gcr;
   4487 	pcireg_t ctrl2;
   4488 
   4489 	gcr = CSR_READ(sc, WMREG_GCR);
   4490 
   4491 	/* Only take action if timeout value is defaulted to 0 */
   4492 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4493 		goto out;
   4494 
   4495 	if ((gcr & GCR_CAP_VER2) == 0) {
   4496 		gcr |= GCR_CMPL_TMOUT_10MS;
   4497 		goto out;
   4498 	}
   4499 
   4500 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4501 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4502 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4503 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4504 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4505 
   4506 out:
   4507 	/* Disable completion timeout resend */
   4508 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4509 
   4510 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4511 }
   4512 
   4513 void
   4514 wm_get_auto_rd_done(struct wm_softc *sc)
   4515 {
   4516 	int i;
   4517 
   4518 	/* wait for eeprom to reload */
   4519 	switch (sc->sc_type) {
   4520 	case WM_T_82571:
   4521 	case WM_T_82572:
   4522 	case WM_T_82573:
   4523 	case WM_T_82574:
   4524 	case WM_T_82583:
   4525 	case WM_T_82575:
   4526 	case WM_T_82576:
   4527 	case WM_T_82580:
   4528 	case WM_T_I350:
   4529 	case WM_T_I354:
   4530 	case WM_T_I210:
   4531 	case WM_T_I211:
   4532 	case WM_T_80003:
   4533 	case WM_T_ICH8:
   4534 	case WM_T_ICH9:
   4535 		for (i = 0; i < 10; i++) {
   4536 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4537 				break;
   4538 			delay(1000);
   4539 		}
   4540 		if (i == 10) {
   4541 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4542 			    "complete\n", device_xname(sc->sc_dev));
   4543 		}
   4544 		break;
   4545 	default:
   4546 		break;
   4547 	}
   4548 }
   4549 
   4550 void
   4551 wm_lan_init_done(struct wm_softc *sc)
   4552 {
   4553 	uint32_t reg = 0;
   4554 	int i;
   4555 
   4556 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4557 		device_xname(sc->sc_dev), __func__));
   4558 
   4559 	/* Wait for eeprom to reload */
   4560 	switch (sc->sc_type) {
   4561 	case WM_T_ICH10:
   4562 	case WM_T_PCH:
   4563 	case WM_T_PCH2:
   4564 	case WM_T_PCH_LPT:
   4565 	case WM_T_PCH_SPT:
   4566 	case WM_T_PCH_CNP:
   4567 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4568 			reg = CSR_READ(sc, WMREG_STATUS);
   4569 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4570 				break;
   4571 			delay(100);
   4572 		}
   4573 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4574 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4575 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4576 		}
   4577 		break;
   4578 	default:
   4579 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4580 		    __func__);
   4581 		break;
   4582 	}
   4583 
   4584 	reg &= ~STATUS_LAN_INIT_DONE;
   4585 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4586 }
   4587 
   4588 void
   4589 wm_get_cfg_done(struct wm_softc *sc)
   4590 {
   4591 	int mask;
   4592 	uint32_t reg;
   4593 	int i;
   4594 
   4595 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4596 		device_xname(sc->sc_dev), __func__));
   4597 
   4598 	/* Wait for eeprom to reload */
   4599 	switch (sc->sc_type) {
   4600 	case WM_T_82542_2_0:
   4601 	case WM_T_82542_2_1:
   4602 		/* null */
   4603 		break;
   4604 	case WM_T_82543:
   4605 	case WM_T_82544:
   4606 	case WM_T_82540:
   4607 	case WM_T_82545:
   4608 	case WM_T_82545_3:
   4609 	case WM_T_82546:
   4610 	case WM_T_82546_3:
   4611 	case WM_T_82541:
   4612 	case WM_T_82541_2:
   4613 	case WM_T_82547:
   4614 	case WM_T_82547_2:
   4615 	case WM_T_82573:
   4616 	case WM_T_82574:
   4617 	case WM_T_82583:
   4618 		/* generic */
   4619 		delay(10*1000);
   4620 		break;
   4621 	case WM_T_80003:
   4622 	case WM_T_82571:
   4623 	case WM_T_82572:
   4624 	case WM_T_82575:
   4625 	case WM_T_82576:
   4626 	case WM_T_82580:
   4627 	case WM_T_I350:
   4628 	case WM_T_I354:
   4629 	case WM_T_I210:
   4630 	case WM_T_I211:
   4631 		if (sc->sc_type == WM_T_82571) {
   4632 			/* Only 82571 shares port 0 */
   4633 			mask = EEMNGCTL_CFGDONE_0;
   4634 		} else
   4635 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4636 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4637 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4638 				break;
   4639 			delay(1000);
   4640 		}
   4641 		if (i >= WM_PHY_CFG_TIMEOUT)
   4642 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4643 				device_xname(sc->sc_dev), __func__));
   4644 		break;
   4645 	case WM_T_ICH8:
   4646 	case WM_T_ICH9:
   4647 	case WM_T_ICH10:
   4648 	case WM_T_PCH:
   4649 	case WM_T_PCH2:
   4650 	case WM_T_PCH_LPT:
   4651 	case WM_T_PCH_SPT:
   4652 	case WM_T_PCH_CNP:
   4653 		delay(10*1000);
   4654 		if (sc->sc_type >= WM_T_ICH10)
   4655 			wm_lan_init_done(sc);
   4656 		else
   4657 			wm_get_auto_rd_done(sc);
   4658 
   4659 		/* Clear PHY Reset Asserted bit */
   4660 		reg = CSR_READ(sc, WMREG_STATUS);
   4661 		if ((reg & STATUS_PHYRA) != 0)
   4662 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4663 		break;
   4664 	default:
   4665 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4666 		    __func__);
   4667 		break;
   4668 	}
   4669 }
   4670 
   4671 int
   4672 wm_phy_post_reset(struct wm_softc *sc)
   4673 {
   4674 	device_t dev = sc->sc_dev;
   4675 	uint16_t reg;
   4676 	int rv = 0;
   4677 
   4678 	/* This function is only for ICH8 and newer. */
   4679 	if (sc->sc_type < WM_T_ICH8)
   4680 		return 0;
   4681 
   4682 	if (wm_phy_resetisblocked(sc)) {
   4683 		/* XXX */
   4684 		device_printf(dev, "PHY is blocked\n");
   4685 		return -1;
   4686 	}
   4687 
   4688 	/* Allow time for h/w to get to quiescent state after reset */
   4689 	delay(10*1000);
   4690 
   4691 	/* Perform any necessary post-reset workarounds */
   4692 	if (sc->sc_type == WM_T_PCH)
   4693 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4694 	else if (sc->sc_type == WM_T_PCH2)
   4695 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4696 	if (rv != 0)
   4697 		return rv;
   4698 
   4699 	/* Clear the host wakeup bit after lcd reset */
   4700 	if (sc->sc_type >= WM_T_PCH) {
   4701 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4702 		reg &= ~BM_WUC_HOST_WU_BIT;
   4703 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4704 	}
   4705 
   4706 	/* Configure the LCD with the extended configuration region in NVM */
   4707 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4708 		return rv;
   4709 
   4710 	/* Configure the LCD with the OEM bits in NVM */
   4711 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4712 
   4713 	if (sc->sc_type == WM_T_PCH2) {
   4714 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4715 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4716 			delay(10 * 1000);
   4717 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4718 		}
   4719 		/* Set EEE LPI Update Timer to 200usec */
   4720 		rv = sc->phy.acquire(sc);
   4721 		if (rv)
   4722 			return rv;
   4723 		rv = wm_write_emi_reg_locked(dev,
   4724 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4725 		sc->phy.release(sc);
   4726 	}
   4727 
   4728 	return rv;
   4729 }
   4730 
   4731 /* Only for PCH and newer */
   4732 static int
   4733 wm_write_smbus_addr(struct wm_softc *sc)
   4734 {
   4735 	uint32_t strap, freq;
   4736 	uint16_t phy_data;
   4737 	int rv;
   4738 
   4739 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4740 		device_xname(sc->sc_dev), __func__));
   4741 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4742 
   4743 	strap = CSR_READ(sc, WMREG_STRAP);
   4744 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4745 
   4746 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4747 	if (rv != 0)
   4748 		return rv;
   4749 
   4750 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4751 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4752 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4753 
   4754 	if (sc->sc_phytype == WMPHY_I217) {
   4755 		/* Restore SMBus frequency */
   4756 		if (freq --) {
   4757 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4758 			    | HV_SMB_ADDR_FREQ_HIGH);
   4759 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4760 			    HV_SMB_ADDR_FREQ_LOW);
   4761 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4762 			    HV_SMB_ADDR_FREQ_HIGH);
   4763 		} else
   4764 			DPRINTF(sc, WM_DEBUG_INIT,
   4765 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4766 				device_xname(sc->sc_dev), __func__));
   4767 	}
   4768 
   4769 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4770 	    phy_data);
   4771 }
   4772 
   4773 static int
   4774 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4775 {
   4776 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4777 	uint16_t phy_page = 0;
   4778 	int rv = 0;
   4779 
   4780 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4781 		device_xname(sc->sc_dev), __func__));
   4782 
   4783 	switch (sc->sc_type) {
   4784 	case WM_T_ICH8:
   4785 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4786 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4787 			return 0;
   4788 
   4789 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4790 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4791 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4792 			break;
   4793 		}
   4794 		/* FALLTHROUGH */
   4795 	case WM_T_PCH:
   4796 	case WM_T_PCH2:
   4797 	case WM_T_PCH_LPT:
   4798 	case WM_T_PCH_SPT:
   4799 	case WM_T_PCH_CNP:
   4800 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4801 		break;
   4802 	default:
   4803 		return 0;
   4804 	}
   4805 
   4806 	if ((rv = sc->phy.acquire(sc)) != 0)
   4807 		return rv;
   4808 
   4809 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4810 	if ((reg & sw_cfg_mask) == 0)
   4811 		goto release;
   4812 
   4813 	/*
   4814 	 * Make sure HW does not configure LCD from PHY extended configuration
   4815 	 * before SW configuration
   4816 	 */
   4817 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4818 	if ((sc->sc_type < WM_T_PCH2)
   4819 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4820 		goto release;
   4821 
   4822 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4823 		device_xname(sc->sc_dev), __func__));
   4824 	/* word_addr is in DWORD */
   4825 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4826 
   4827 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4828 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4829 	if (cnf_size == 0)
   4830 		goto release;
   4831 
   4832 	if (((sc->sc_type == WM_T_PCH)
   4833 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4834 	    || (sc->sc_type > WM_T_PCH)) {
   4835 		/*
   4836 		 * HW configures the SMBus address and LEDs when the OEM and
   4837 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4838 		 * are cleared, SW will configure them instead.
   4839 		 */
   4840 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4841 			device_xname(sc->sc_dev), __func__));
   4842 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4843 			goto release;
   4844 
   4845 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4846 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4847 		    (uint16_t)reg);
   4848 		if (rv != 0)
   4849 			goto release;
   4850 	}
   4851 
   4852 	/* Configure LCD from extended configuration region. */
   4853 	for (i = 0; i < cnf_size; i++) {
   4854 		uint16_t reg_data, reg_addr;
   4855 
   4856 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4857 			goto release;
   4858 
   4859 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4860 			goto release;
   4861 
   4862 		if (reg_addr == IGPHY_PAGE_SELECT)
   4863 			phy_page = reg_data;
   4864 
   4865 		reg_addr &= IGPHY_MAXREGADDR;
   4866 		reg_addr |= phy_page;
   4867 
   4868 		KASSERT(sc->phy.writereg_locked != NULL);
   4869 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4870 		    reg_data);
   4871 	}
   4872 
   4873 release:
   4874 	sc->phy.release(sc);
   4875 	return rv;
   4876 }
   4877 
   4878 /*
   4879  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4880  *  @sc:       pointer to the HW structure
   4881  *  @d0_state: boolean if entering d0 or d3 device state
   4882  *
   4883  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4884  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4885  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4886  */
   4887 int
   4888 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4889 {
   4890 	uint32_t mac_reg;
   4891 	uint16_t oem_reg;
   4892 	int rv;
   4893 
   4894 	if (sc->sc_type < WM_T_PCH)
   4895 		return 0;
   4896 
   4897 	rv = sc->phy.acquire(sc);
   4898 	if (rv != 0)
   4899 		return rv;
   4900 
   4901 	if (sc->sc_type == WM_T_PCH) {
   4902 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4903 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4904 			goto release;
   4905 	}
   4906 
   4907 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4908 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4909 		goto release;
   4910 
   4911 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4912 
   4913 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4914 	if (rv != 0)
   4915 		goto release;
   4916 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4917 
   4918 	if (d0_state) {
   4919 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4920 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4921 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4922 			oem_reg |= HV_OEM_BITS_LPLU;
   4923 	} else {
   4924 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4925 		    != 0)
   4926 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4927 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4928 		    != 0)
   4929 			oem_reg |= HV_OEM_BITS_LPLU;
   4930 	}
   4931 
   4932 	/* Set Restart auto-neg to activate the bits */
   4933 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4934 	    && (wm_phy_resetisblocked(sc) == false))
   4935 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4936 
   4937 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4938 
   4939 release:
   4940 	sc->phy.release(sc);
   4941 
   4942 	return rv;
   4943 }
   4944 
   4945 /* Init hardware bits */
   4946 void
   4947 wm_initialize_hardware_bits(struct wm_softc *sc)
   4948 {
   4949 	uint32_t tarc0, tarc1, reg;
   4950 
   4951 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4952 		device_xname(sc->sc_dev), __func__));
   4953 
   4954 	/* For 82571 variant, 80003 and ICHs */
   4955 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4956 	    || (sc->sc_type >= WM_T_80003)) {
   4957 
   4958 		/* Transmit Descriptor Control 0 */
   4959 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4960 		reg |= TXDCTL_COUNT_DESC;
   4961 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4962 
   4963 		/* Transmit Descriptor Control 1 */
   4964 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4965 		reg |= TXDCTL_COUNT_DESC;
   4966 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4967 
   4968 		/* TARC0 */
   4969 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4970 		switch (sc->sc_type) {
   4971 		case WM_T_82571:
   4972 		case WM_T_82572:
   4973 		case WM_T_82573:
   4974 		case WM_T_82574:
   4975 		case WM_T_82583:
   4976 		case WM_T_80003:
   4977 			/* Clear bits 30..27 */
   4978 			tarc0 &= ~__BITS(30, 27);
   4979 			break;
   4980 		default:
   4981 			break;
   4982 		}
   4983 
   4984 		switch (sc->sc_type) {
   4985 		case WM_T_82571:
   4986 		case WM_T_82572:
   4987 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4988 
   4989 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4990 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4991 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4992 			/* 8257[12] Errata No.7 */
   4993 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4994 
   4995 			/* TARC1 bit 28 */
   4996 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4997 				tarc1 &= ~__BIT(28);
   4998 			else
   4999 				tarc1 |= __BIT(28);
   5000 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5001 
   5002 			/*
   5003 			 * 8257[12] Errata No.13
   5004 			 * Disable Dyamic Clock Gating.
   5005 			 */
   5006 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5007 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   5008 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5009 			break;
   5010 		case WM_T_82573:
   5011 		case WM_T_82574:
   5012 		case WM_T_82583:
   5013 			if ((sc->sc_type == WM_T_82574)
   5014 			    || (sc->sc_type == WM_T_82583))
   5015 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   5016 
   5017 			/* Extended Device Control */
   5018 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5019 			reg &= ~__BIT(23);	/* Clear bit 23 */
   5020 			reg |= __BIT(22);	/* Set bit 22 */
   5021 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5022 
   5023 			/* Device Control */
   5024 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   5025 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5026 
   5027 			/* PCIe Control Register */
   5028 			/*
   5029 			 * 82573 Errata (unknown).
   5030 			 *
   5031 			 * 82574 Errata 25 and 82583 Errata 12
   5032 			 * "Dropped Rx Packets":
   5033 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   5034 			 */
   5035 			reg = CSR_READ(sc, WMREG_GCR);
   5036 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   5037 			CSR_WRITE(sc, WMREG_GCR, reg);
   5038 
   5039 			if ((sc->sc_type == WM_T_82574)
   5040 			    || (sc->sc_type == WM_T_82583)) {
   5041 				/*
   5042 				 * Document says this bit must be set for
   5043 				 * proper operation.
   5044 				 */
   5045 				reg = CSR_READ(sc, WMREG_GCR);
   5046 				reg |= __BIT(22);
   5047 				CSR_WRITE(sc, WMREG_GCR, reg);
   5048 
   5049 				/*
   5050 				 * Apply workaround for hardware errata
   5051 				 * documented in errata docs Fixes issue where
   5052 				 * some error prone or unreliable PCIe
   5053 				 * completions are occurring, particularly
   5054 				 * with ASPM enabled. Without fix, issue can
   5055 				 * cause Tx timeouts.
   5056 				 */
   5057 				reg = CSR_READ(sc, WMREG_GCR2);
   5058 				reg |= __BIT(0);
   5059 				CSR_WRITE(sc, WMREG_GCR2, reg);
   5060 			}
   5061 			break;
   5062 		case WM_T_80003:
   5063 			/* TARC0 */
   5064 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   5065 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   5066 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   5067 
   5068 			/* TARC1 bit 28 */
   5069 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5070 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5071 				tarc1 &= ~__BIT(28);
   5072 			else
   5073 				tarc1 |= __BIT(28);
   5074 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5075 			break;
   5076 		case WM_T_ICH8:
   5077 		case WM_T_ICH9:
   5078 		case WM_T_ICH10:
   5079 		case WM_T_PCH:
   5080 		case WM_T_PCH2:
   5081 		case WM_T_PCH_LPT:
   5082 		case WM_T_PCH_SPT:
   5083 		case WM_T_PCH_CNP:
   5084 			/* TARC0 */
   5085 			if (sc->sc_type == WM_T_ICH8) {
   5086 				/* Set TARC0 bits 29 and 28 */
   5087 				tarc0 |= __BITS(29, 28);
   5088 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   5089 				tarc0 |= __BIT(29);
   5090 				/*
   5091 				 *  Drop bit 28. From Linux.
   5092 				 * See I218/I219 spec update
   5093 				 * "5. Buffer Overrun While the I219 is
   5094 				 * Processing DMA Transactions"
   5095 				 */
   5096 				tarc0 &= ~__BIT(28);
   5097 			}
   5098 			/* Set TARC0 bits 23,24,26,27 */
   5099 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   5100 
   5101 			/* CTRL_EXT */
   5102 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5103 			reg |= __BIT(22);	/* Set bit 22 */
   5104 			/*
   5105 			 * Enable PHY low-power state when MAC is at D3
   5106 			 * w/o WoL
   5107 			 */
   5108 			if (sc->sc_type >= WM_T_PCH)
   5109 				reg |= CTRL_EXT_PHYPDEN;
   5110 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5111 
   5112 			/* TARC1 */
   5113 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5114 			/* bit 28 */
   5115 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5116 				tarc1 &= ~__BIT(28);
   5117 			else
   5118 				tarc1 |= __BIT(28);
   5119 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   5120 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5121 
   5122 			/* Device Status */
   5123 			if (sc->sc_type == WM_T_ICH8) {
   5124 				reg = CSR_READ(sc, WMREG_STATUS);
   5125 				reg &= ~__BIT(31);
   5126 				CSR_WRITE(sc, WMREG_STATUS, reg);
   5127 
   5128 			}
   5129 
   5130 			/* IOSFPC */
   5131 			if (sc->sc_type == WM_T_PCH_SPT) {
   5132 				reg = CSR_READ(sc, WMREG_IOSFPC);
   5133 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   5134 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   5135 			}
   5136 			/*
   5137 			 * Work-around descriptor data corruption issue during
   5138 			 * NFS v2 UDP traffic, just disable the NFS filtering
   5139 			 * capability.
   5140 			 */
   5141 			reg = CSR_READ(sc, WMREG_RFCTL);
   5142 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   5143 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5144 			break;
   5145 		default:
   5146 			break;
   5147 		}
   5148 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   5149 
   5150 		switch (sc->sc_type) {
   5151 		/*
   5152 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   5153 		 * Avoid RSS Hash Value bug.
   5154 		 */
   5155 		case WM_T_82571:
   5156 		case WM_T_82572:
   5157 		case WM_T_82573:
   5158 		case WM_T_80003:
   5159 		case WM_T_ICH8:
   5160 			reg = CSR_READ(sc, WMREG_RFCTL);
   5161 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   5162 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5163 			break;
   5164 		case WM_T_82574:
   5165 			/* Use extened Rx descriptor. */
   5166 			reg = CSR_READ(sc, WMREG_RFCTL);
   5167 			reg |= WMREG_RFCTL_EXSTEN;
   5168 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5169 			break;
   5170 		default:
   5171 			break;
   5172 		}
   5173 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   5174 		/*
   5175 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   5176 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   5177 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   5178 		 * Correctly by the Device"
   5179 		 *
   5180 		 * I354(C2000) Errata AVR53:
   5181 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   5182 		 * Hang"
   5183 		 */
   5184 		reg = CSR_READ(sc, WMREG_RFCTL);
   5185 		reg |= WMREG_RFCTL_IPV6EXDIS;
   5186 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   5187 	}
   5188 }
   5189 
   5190 static uint32_t
   5191 wm_rxpbs_adjust_82580(uint32_t val)
   5192 {
   5193 	uint32_t rv = 0;
   5194 
   5195 	if (val < __arraycount(wm_82580_rxpbs_table))
   5196 		rv = wm_82580_rxpbs_table[val];
   5197 
   5198 	return rv;
   5199 }
   5200 
   5201 /*
   5202  * wm_reset_phy:
   5203  *
   5204  *	generic PHY reset function.
   5205  *	Same as e1000_phy_hw_reset_generic()
   5206  */
   5207 static int
   5208 wm_reset_phy(struct wm_softc *sc)
   5209 {
   5210 	uint32_t reg;
   5211 
   5212 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5213 		device_xname(sc->sc_dev), __func__));
   5214 	if (wm_phy_resetisblocked(sc))
   5215 		return -1;
   5216 
   5217 	sc->phy.acquire(sc);
   5218 
   5219 	reg = CSR_READ(sc, WMREG_CTRL);
   5220 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   5221 	CSR_WRITE_FLUSH(sc);
   5222 
   5223 	delay(sc->phy.reset_delay_us);
   5224 
   5225 	CSR_WRITE(sc, WMREG_CTRL, reg);
   5226 	CSR_WRITE_FLUSH(sc);
   5227 
   5228 	delay(150);
   5229 
   5230 	sc->phy.release(sc);
   5231 
   5232 	wm_get_cfg_done(sc);
   5233 	wm_phy_post_reset(sc);
   5234 
   5235 	return 0;
   5236 }
   5237 
   5238 /*
   5239  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   5240  *
   5241  * In i219, the descriptor rings must be emptied before resetting the HW
   5242  * or before changing the device state to D3 during runtime (runtime PM).
   5243  *
   5244  * Failure to do this will cause the HW to enter a unit hang state which can
   5245  * only be released by PCI reset on the device.
   5246  *
   5247  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   5248  */
   5249 static void
   5250 wm_flush_desc_rings(struct wm_softc *sc)
   5251 {
   5252 	pcireg_t preg;
   5253 	uint32_t reg;
   5254 	struct wm_txqueue *txq;
   5255 	wiseman_txdesc_t *txd;
   5256 	int nexttx;
   5257 	uint32_t rctl;
   5258 
   5259 	/* First, disable MULR fix in FEXTNVM11 */
   5260 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5261 	reg |= FEXTNVM11_DIS_MULRFIX;
   5262 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5263 
   5264 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5265 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   5266 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   5267 		return;
   5268 
   5269 	/*
   5270 	 * Remove all descriptors from the tx_ring.
   5271 	 *
   5272 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   5273 	 * happens when the HW reads the regs. We assign the ring itself as
   5274 	 * the data of the next descriptor. We don't care about the data we are
   5275 	 * about to reset the HW.
   5276 	 */
   5277 #ifdef WM_DEBUG
   5278 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   5279 #endif
   5280 	reg = CSR_READ(sc, WMREG_TCTL);
   5281 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   5282 
   5283 	txq = &sc->sc_queue[0].wmq_txq;
   5284 	nexttx = txq->txq_next;
   5285 	txd = &txq->txq_descs[nexttx];
   5286 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   5287 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   5288 	txd->wtx_fields.wtxu_status = 0;
   5289 	txd->wtx_fields.wtxu_options = 0;
   5290 	txd->wtx_fields.wtxu_vlan = 0;
   5291 
   5292 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5293 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5294 
   5295 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   5296 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   5297 	CSR_WRITE_FLUSH(sc);
   5298 	delay(250);
   5299 
   5300 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5301 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   5302 		return;
   5303 
   5304 	/*
   5305 	 * Mark all descriptors in the RX ring as consumed and disable the
   5306 	 * rx ring.
   5307 	 */
   5308 #ifdef WM_DEBUG
   5309 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   5310 #endif
   5311 	rctl = CSR_READ(sc, WMREG_RCTL);
   5312 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5313 	CSR_WRITE_FLUSH(sc);
   5314 	delay(150);
   5315 
   5316 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   5317 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   5318 	reg &= 0xffffc000;
   5319 	/*
   5320 	 * Update thresholds: prefetch threshold to 31, host threshold
   5321 	 * to 1 and make sure the granularity is "descriptors" and not
   5322 	 * "cache lines"
   5323 	 */
   5324 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   5325 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   5326 
   5327 	/* Momentarily enable the RX ring for the changes to take effect */
   5328 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   5329 	CSR_WRITE_FLUSH(sc);
   5330 	delay(150);
   5331 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5332 }
   5333 
   5334 /*
   5335  * wm_reset:
   5336  *
   5337  *	Reset the i82542 chip.
   5338  */
   5339 static void
   5340 wm_reset(struct wm_softc *sc)
   5341 {
   5342 	int phy_reset = 0;
   5343 	int i, error = 0;
   5344 	uint32_t reg;
   5345 	uint16_t kmreg;
   5346 	int rv;
   5347 
   5348 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5349 		device_xname(sc->sc_dev), __func__));
   5350 	KASSERT(sc->sc_type != 0);
   5351 
   5352 	/*
   5353 	 * Allocate on-chip memory according to the MTU size.
   5354 	 * The Packet Buffer Allocation register must be written
   5355 	 * before the chip is reset.
   5356 	 */
   5357 	switch (sc->sc_type) {
   5358 	case WM_T_82547:
   5359 	case WM_T_82547_2:
   5360 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5361 		    PBA_22K : PBA_30K;
   5362 		for (i = 0; i < sc->sc_nqueues; i++) {
   5363 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5364 			txq->txq_fifo_head = 0;
   5365 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   5366 			txq->txq_fifo_size =
   5367 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   5368 			txq->txq_fifo_stall = 0;
   5369 		}
   5370 		break;
   5371 	case WM_T_82571:
   5372 	case WM_T_82572:
   5373 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   5374 	case WM_T_80003:
   5375 		sc->sc_pba = PBA_32K;
   5376 		break;
   5377 	case WM_T_82573:
   5378 		sc->sc_pba = PBA_12K;
   5379 		break;
   5380 	case WM_T_82574:
   5381 	case WM_T_82583:
   5382 		sc->sc_pba = PBA_20K;
   5383 		break;
   5384 	case WM_T_82576:
   5385 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5386 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5387 		break;
   5388 	case WM_T_82580:
   5389 	case WM_T_I350:
   5390 	case WM_T_I354:
   5391 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5392 		break;
   5393 	case WM_T_I210:
   5394 	case WM_T_I211:
   5395 		sc->sc_pba = PBA_34K;
   5396 		break;
   5397 	case WM_T_ICH8:
   5398 		/* Workaround for a bit corruption issue in FIFO memory */
   5399 		sc->sc_pba = PBA_8K;
   5400 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5401 		break;
   5402 	case WM_T_ICH9:
   5403 	case WM_T_ICH10:
   5404 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5405 		    PBA_14K : PBA_10K;
   5406 		break;
   5407 	case WM_T_PCH:
   5408 	case WM_T_PCH2:	/* XXX 14K? */
   5409 	case WM_T_PCH_LPT:
   5410 	case WM_T_PCH_SPT:
   5411 	case WM_T_PCH_CNP:
   5412 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5413 		    PBA_12K : PBA_26K;
   5414 		break;
   5415 	default:
   5416 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5417 		    PBA_40K : PBA_48K;
   5418 		break;
   5419 	}
   5420 	/*
   5421 	 * Only old or non-multiqueue devices have the PBA register
   5422 	 * XXX Need special handling for 82575.
   5423 	 */
   5424 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5425 	    || (sc->sc_type == WM_T_82575))
   5426 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5427 
   5428 	/* Prevent the PCI-E bus from sticking */
   5429 	if (sc->sc_flags & WM_F_PCIE) {
   5430 		int timeout = 800;
   5431 
   5432 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5433 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5434 
   5435 		while (timeout--) {
   5436 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5437 			    == 0)
   5438 				break;
   5439 			delay(100);
   5440 		}
   5441 		if (timeout == 0)
   5442 			device_printf(sc->sc_dev,
   5443 			    "failed to disable bus mastering\n");
   5444 	}
   5445 
   5446 	/* Set the completion timeout for interface */
   5447 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5448 	    || (sc->sc_type == WM_T_82580)
   5449 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5450 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5451 		wm_set_pcie_completion_timeout(sc);
   5452 
   5453 	/* Clear interrupt */
   5454 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5455 	if (wm_is_using_msix(sc)) {
   5456 		if (sc->sc_type != WM_T_82574) {
   5457 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5458 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5459 		} else
   5460 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5461 	}
   5462 
   5463 	/* Stop the transmit and receive processes. */
   5464 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5465 	sc->sc_rctl &= ~RCTL_EN;
   5466 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5467 	CSR_WRITE_FLUSH(sc);
   5468 
   5469 	/* XXX set_tbi_sbp_82543() */
   5470 
   5471 	delay(10*1000);
   5472 
   5473 	/* Must acquire the MDIO ownership before MAC reset */
   5474 	switch (sc->sc_type) {
   5475 	case WM_T_82573:
   5476 	case WM_T_82574:
   5477 	case WM_T_82583:
   5478 		error = wm_get_hw_semaphore_82573(sc);
   5479 		break;
   5480 	default:
   5481 		break;
   5482 	}
   5483 
   5484 	/*
   5485 	 * 82541 Errata 29? & 82547 Errata 28?
   5486 	 * See also the description about PHY_RST bit in CTRL register
   5487 	 * in 8254x_GBe_SDM.pdf.
   5488 	 */
   5489 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5490 		CSR_WRITE(sc, WMREG_CTRL,
   5491 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5492 		CSR_WRITE_FLUSH(sc);
   5493 		delay(5000);
   5494 	}
   5495 
   5496 	switch (sc->sc_type) {
   5497 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5498 	case WM_T_82541:
   5499 	case WM_T_82541_2:
   5500 	case WM_T_82547:
   5501 	case WM_T_82547_2:
   5502 		/*
   5503 		 * On some chipsets, a reset through a memory-mapped write
   5504 		 * cycle can cause the chip to reset before completing the
   5505 		 * write cycle. This causes major headache that can be avoided
   5506 		 * by issuing the reset via indirect register writes through
   5507 		 * I/O space.
   5508 		 *
   5509 		 * So, if we successfully mapped the I/O BAR at attach time,
   5510 		 * use that. Otherwise, try our luck with a memory-mapped
   5511 		 * reset.
   5512 		 */
   5513 		if (sc->sc_flags & WM_F_IOH_VALID)
   5514 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5515 		else
   5516 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5517 		break;
   5518 	case WM_T_82545_3:
   5519 	case WM_T_82546_3:
   5520 		/* Use the shadow control register on these chips. */
   5521 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5522 		break;
   5523 	case WM_T_80003:
   5524 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5525 		sc->phy.acquire(sc);
   5526 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5527 		sc->phy.release(sc);
   5528 		break;
   5529 	case WM_T_ICH8:
   5530 	case WM_T_ICH9:
   5531 	case WM_T_ICH10:
   5532 	case WM_T_PCH:
   5533 	case WM_T_PCH2:
   5534 	case WM_T_PCH_LPT:
   5535 	case WM_T_PCH_SPT:
   5536 	case WM_T_PCH_CNP:
   5537 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5538 		if (wm_phy_resetisblocked(sc) == false) {
   5539 			/*
   5540 			 * Gate automatic PHY configuration by hardware on
   5541 			 * non-managed 82579
   5542 			 */
   5543 			if ((sc->sc_type == WM_T_PCH2)
   5544 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5545 				== 0))
   5546 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5547 
   5548 			reg |= CTRL_PHY_RESET;
   5549 			phy_reset = 1;
   5550 		} else
   5551 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5552 		sc->phy.acquire(sc);
   5553 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5554 		/* Don't insert a completion barrier when reset */
   5555 		delay(20*1000);
   5556 		mutex_exit(sc->sc_ich_phymtx);
   5557 		break;
   5558 	case WM_T_82580:
   5559 	case WM_T_I350:
   5560 	case WM_T_I354:
   5561 	case WM_T_I210:
   5562 	case WM_T_I211:
   5563 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5564 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5565 			CSR_WRITE_FLUSH(sc);
   5566 		delay(5000);
   5567 		break;
   5568 	case WM_T_82542_2_0:
   5569 	case WM_T_82542_2_1:
   5570 	case WM_T_82543:
   5571 	case WM_T_82540:
   5572 	case WM_T_82545:
   5573 	case WM_T_82546:
   5574 	case WM_T_82571:
   5575 	case WM_T_82572:
   5576 	case WM_T_82573:
   5577 	case WM_T_82574:
   5578 	case WM_T_82575:
   5579 	case WM_T_82576:
   5580 	case WM_T_82583:
   5581 	default:
   5582 		/* Everything else can safely use the documented method. */
   5583 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5584 		break;
   5585 	}
   5586 
   5587 	/* Must release the MDIO ownership after MAC reset */
   5588 	switch (sc->sc_type) {
   5589 	case WM_T_82573:
   5590 	case WM_T_82574:
   5591 	case WM_T_82583:
   5592 		if (error == 0)
   5593 			wm_put_hw_semaphore_82573(sc);
   5594 		break;
   5595 	default:
   5596 		break;
   5597 	}
   5598 
   5599 	/* Set Phy Config Counter to 50msec */
   5600 	if (sc->sc_type == WM_T_PCH2) {
   5601 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5602 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5603 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5604 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5605 	}
   5606 
   5607 	if (phy_reset != 0)
   5608 		wm_get_cfg_done(sc);
   5609 
   5610 	/* Reload EEPROM */
   5611 	switch (sc->sc_type) {
   5612 	case WM_T_82542_2_0:
   5613 	case WM_T_82542_2_1:
   5614 	case WM_T_82543:
   5615 	case WM_T_82544:
   5616 		delay(10);
   5617 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5618 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5619 		CSR_WRITE_FLUSH(sc);
   5620 		delay(2000);
   5621 		break;
   5622 	case WM_T_82540:
   5623 	case WM_T_82545:
   5624 	case WM_T_82545_3:
   5625 	case WM_T_82546:
   5626 	case WM_T_82546_3:
   5627 		delay(5*1000);
   5628 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5629 		break;
   5630 	case WM_T_82541:
   5631 	case WM_T_82541_2:
   5632 	case WM_T_82547:
   5633 	case WM_T_82547_2:
   5634 		delay(20000);
   5635 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5636 		break;
   5637 	case WM_T_82571:
   5638 	case WM_T_82572:
   5639 	case WM_T_82573:
   5640 	case WM_T_82574:
   5641 	case WM_T_82583:
   5642 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5643 			delay(10);
   5644 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5645 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5646 			CSR_WRITE_FLUSH(sc);
   5647 		}
   5648 		/* check EECD_EE_AUTORD */
   5649 		wm_get_auto_rd_done(sc);
   5650 		/*
   5651 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5652 		 * is set.
   5653 		 */
   5654 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5655 		    || (sc->sc_type == WM_T_82583))
   5656 			delay(25*1000);
   5657 		break;
   5658 	case WM_T_82575:
   5659 	case WM_T_82576:
   5660 	case WM_T_82580:
   5661 	case WM_T_I350:
   5662 	case WM_T_I354:
   5663 	case WM_T_I210:
   5664 	case WM_T_I211:
   5665 	case WM_T_80003:
   5666 		/* check EECD_EE_AUTORD */
   5667 		wm_get_auto_rd_done(sc);
   5668 		break;
   5669 	case WM_T_ICH8:
   5670 	case WM_T_ICH9:
   5671 	case WM_T_ICH10:
   5672 	case WM_T_PCH:
   5673 	case WM_T_PCH2:
   5674 	case WM_T_PCH_LPT:
   5675 	case WM_T_PCH_SPT:
   5676 	case WM_T_PCH_CNP:
   5677 		break;
   5678 	default:
   5679 		panic("%s: unknown type\n", __func__);
   5680 	}
   5681 
   5682 	/* Check whether EEPROM is present or not */
   5683 	switch (sc->sc_type) {
   5684 	case WM_T_82575:
   5685 	case WM_T_82576:
   5686 	case WM_T_82580:
   5687 	case WM_T_I350:
   5688 	case WM_T_I354:
   5689 	case WM_T_ICH8:
   5690 	case WM_T_ICH9:
   5691 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5692 			/* Not found */
   5693 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5694 			if (sc->sc_type == WM_T_82575)
   5695 				wm_reset_init_script_82575(sc);
   5696 		}
   5697 		break;
   5698 	default:
   5699 		break;
   5700 	}
   5701 
   5702 	if (phy_reset != 0)
   5703 		wm_phy_post_reset(sc);
   5704 
   5705 	if ((sc->sc_type == WM_T_82580)
   5706 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5707 		/* Clear global device reset status bit */
   5708 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5709 	}
   5710 
   5711 	/* Clear any pending interrupt events. */
   5712 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5713 	reg = CSR_READ(sc, WMREG_ICR);
   5714 	if (wm_is_using_msix(sc)) {
   5715 		if (sc->sc_type != WM_T_82574) {
   5716 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5717 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5718 		} else
   5719 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5720 	}
   5721 
   5722 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5723 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5724 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5725 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5726 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5727 		reg |= KABGTXD_BGSQLBIAS;
   5728 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5729 	}
   5730 
   5731 	/* Reload sc_ctrl */
   5732 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5733 
   5734 	wm_set_eee(sc);
   5735 
   5736 	/*
   5737 	 * For PCH, this write will make sure that any noise will be detected
   5738 	 * as a CRC error and be dropped rather than show up as a bad packet
   5739 	 * to the DMA engine
   5740 	 */
   5741 	if (sc->sc_type == WM_T_PCH)
   5742 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5743 
   5744 	if (sc->sc_type >= WM_T_82544)
   5745 		CSR_WRITE(sc, WMREG_WUC, 0);
   5746 
   5747 	if (sc->sc_type < WM_T_82575)
   5748 		wm_disable_aspm(sc); /* Workaround for some chips */
   5749 
   5750 	wm_reset_mdicnfg_82580(sc);
   5751 
   5752 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5753 		wm_pll_workaround_i210(sc);
   5754 
   5755 	if (sc->sc_type == WM_T_80003) {
   5756 		/* Default to TRUE to enable the MDIC W/A */
   5757 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5758 
   5759 		rv = wm_kmrn_readreg(sc,
   5760 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5761 		if (rv == 0) {
   5762 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5763 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5764 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5765 			else
   5766 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5767 		}
   5768 	}
   5769 }
   5770 
   5771 /*
   5772  * wm_add_rxbuf:
   5773  *
   5774  *	Add a receive buffer to the indiciated descriptor.
   5775  */
   5776 static int
   5777 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5778 {
   5779 	struct wm_softc *sc = rxq->rxq_sc;
   5780 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5781 	struct mbuf *m;
   5782 	int error;
   5783 
   5784 	KASSERT(mutex_owned(rxq->rxq_lock));
   5785 
   5786 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5787 	if (m == NULL)
   5788 		return ENOBUFS;
   5789 
   5790 	MCLGET(m, M_DONTWAIT);
   5791 	if ((m->m_flags & M_EXT) == 0) {
   5792 		m_freem(m);
   5793 		return ENOBUFS;
   5794 	}
   5795 
   5796 	if (rxs->rxs_mbuf != NULL)
   5797 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5798 
   5799 	rxs->rxs_mbuf = m;
   5800 
   5801 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5802 	/*
   5803 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5804 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5805 	 */
   5806 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5807 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5808 	if (error) {
   5809 		/* XXX XXX XXX */
   5810 		aprint_error_dev(sc->sc_dev,
   5811 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5812 		panic("wm_add_rxbuf");
   5813 	}
   5814 
   5815 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5816 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5817 
   5818 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5819 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5820 			wm_init_rxdesc(rxq, idx);
   5821 	} else
   5822 		wm_init_rxdesc(rxq, idx);
   5823 
   5824 	return 0;
   5825 }
   5826 
   5827 /*
   5828  * wm_rxdrain:
   5829  *
   5830  *	Drain the receive queue.
   5831  */
   5832 static void
   5833 wm_rxdrain(struct wm_rxqueue *rxq)
   5834 {
   5835 	struct wm_softc *sc = rxq->rxq_sc;
   5836 	struct wm_rxsoft *rxs;
   5837 	int i;
   5838 
   5839 	KASSERT(mutex_owned(rxq->rxq_lock));
   5840 
   5841 	for (i = 0; i < WM_NRXDESC; i++) {
   5842 		rxs = &rxq->rxq_soft[i];
   5843 		if (rxs->rxs_mbuf != NULL) {
   5844 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5845 			m_freem(rxs->rxs_mbuf);
   5846 			rxs->rxs_mbuf = NULL;
   5847 		}
   5848 	}
   5849 }
   5850 
   5851 /*
   5852  * Setup registers for RSS.
   5853  *
   5854  * XXX not yet VMDq support
   5855  */
   5856 static void
   5857 wm_init_rss(struct wm_softc *sc)
   5858 {
   5859 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5860 	int i;
   5861 
   5862 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5863 
   5864 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5865 		unsigned int qid, reta_ent;
   5866 
   5867 		qid  = i % sc->sc_nqueues;
   5868 		switch (sc->sc_type) {
   5869 		case WM_T_82574:
   5870 			reta_ent = __SHIFTIN(qid,
   5871 			    RETA_ENT_QINDEX_MASK_82574);
   5872 			break;
   5873 		case WM_T_82575:
   5874 			reta_ent = __SHIFTIN(qid,
   5875 			    RETA_ENT_QINDEX1_MASK_82575);
   5876 			break;
   5877 		default:
   5878 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5879 			break;
   5880 		}
   5881 
   5882 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5883 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5884 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5885 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5886 	}
   5887 
   5888 	rss_getkey((uint8_t *)rss_key);
   5889 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5890 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5891 
   5892 	if (sc->sc_type == WM_T_82574)
   5893 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5894 	else
   5895 		mrqc = MRQC_ENABLE_RSS_MQ;
   5896 
   5897 	/*
   5898 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5899 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5900 	 */
   5901 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5902 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5903 #if 0
   5904 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5905 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5906 #endif
   5907 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5908 
   5909 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5910 }
   5911 
   5912 /*
   5913  * Adjust TX and RX queue numbers which the system actulally uses.
   5914  *
   5915  * The numbers are affected by below parameters.
   5916  *     - The nubmer of hardware queues
   5917  *     - The number of MSI-X vectors (= "nvectors" argument)
   5918  *     - ncpu
   5919  */
   5920 static void
   5921 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5922 {
   5923 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5924 
   5925 	if (nvectors < 2) {
   5926 		sc->sc_nqueues = 1;
   5927 		return;
   5928 	}
   5929 
   5930 	switch (sc->sc_type) {
   5931 	case WM_T_82572:
   5932 		hw_ntxqueues = 2;
   5933 		hw_nrxqueues = 2;
   5934 		break;
   5935 	case WM_T_82574:
   5936 		hw_ntxqueues = 2;
   5937 		hw_nrxqueues = 2;
   5938 		break;
   5939 	case WM_T_82575:
   5940 		hw_ntxqueues = 4;
   5941 		hw_nrxqueues = 4;
   5942 		break;
   5943 	case WM_T_82576:
   5944 		hw_ntxqueues = 16;
   5945 		hw_nrxqueues = 16;
   5946 		break;
   5947 	case WM_T_82580:
   5948 	case WM_T_I350:
   5949 	case WM_T_I354:
   5950 		hw_ntxqueues = 8;
   5951 		hw_nrxqueues = 8;
   5952 		break;
   5953 	case WM_T_I210:
   5954 		hw_ntxqueues = 4;
   5955 		hw_nrxqueues = 4;
   5956 		break;
   5957 	case WM_T_I211:
   5958 		hw_ntxqueues = 2;
   5959 		hw_nrxqueues = 2;
   5960 		break;
   5961 		/*
   5962 		 * The below Ethernet controllers do not support MSI-X;
   5963 		 * this driver doesn't let them use multiqueue.
   5964 		 *     - WM_T_80003
   5965 		 *     - WM_T_ICH8
   5966 		 *     - WM_T_ICH9
   5967 		 *     - WM_T_ICH10
   5968 		 *     - WM_T_PCH
   5969 		 *     - WM_T_PCH2
   5970 		 *     - WM_T_PCH_LPT
   5971 		 */
   5972 	default:
   5973 		hw_ntxqueues = 1;
   5974 		hw_nrxqueues = 1;
   5975 		break;
   5976 	}
   5977 
   5978 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5979 
   5980 	/*
   5981 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5982 	 * the number of queues used actually.
   5983 	 */
   5984 	if (nvectors < hw_nqueues + 1)
   5985 		sc->sc_nqueues = nvectors - 1;
   5986 	else
   5987 		sc->sc_nqueues = hw_nqueues;
   5988 
   5989 	/*
   5990 	 * As queues more than CPUs cannot improve scaling, we limit
   5991 	 * the number of queues used actually.
   5992 	 */
   5993 	if (ncpu < sc->sc_nqueues)
   5994 		sc->sc_nqueues = ncpu;
   5995 }
   5996 
   5997 static inline bool
   5998 wm_is_using_msix(struct wm_softc *sc)
   5999 {
   6000 
   6001 	return (sc->sc_nintrs > 1);
   6002 }
   6003 
   6004 static inline bool
   6005 wm_is_using_multiqueue(struct wm_softc *sc)
   6006 {
   6007 
   6008 	return (sc->sc_nqueues > 1);
   6009 }
   6010 
   6011 static int
   6012 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   6013 {
   6014 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   6015 
   6016 	wmq->wmq_id = qidx;
   6017 	wmq->wmq_intr_idx = intr_idx;
   6018 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   6019 	    wm_handle_queue, wmq);
   6020 	if (wmq->wmq_si != NULL)
   6021 		return 0;
   6022 
   6023 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   6024 	    wmq->wmq_id);
   6025 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   6026 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6027 	return ENOMEM;
   6028 }
   6029 
   6030 /*
   6031  * Both single interrupt MSI and INTx can use this function.
   6032  */
   6033 static int
   6034 wm_setup_legacy(struct wm_softc *sc)
   6035 {
   6036 	pci_chipset_tag_t pc = sc->sc_pc;
   6037 	const char *intrstr = NULL;
   6038 	char intrbuf[PCI_INTRSTR_LEN];
   6039 	int error;
   6040 
   6041 	error = wm_alloc_txrx_queues(sc);
   6042 	if (error) {
   6043 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6044 		    error);
   6045 		return ENOMEM;
   6046 	}
   6047 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   6048 	    sizeof(intrbuf));
   6049 #ifdef WM_MPSAFE
   6050 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   6051 #endif
   6052 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   6053 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   6054 	if (sc->sc_ihs[0] == NULL) {
   6055 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   6056 		    (pci_intr_type(pc, sc->sc_intrs[0])
   6057 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6058 		return ENOMEM;
   6059 	}
   6060 
   6061 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   6062 	sc->sc_nintrs = 1;
   6063 
   6064 	return wm_softint_establish_queue(sc, 0, 0);
   6065 }
   6066 
   6067 static int
   6068 wm_setup_msix(struct wm_softc *sc)
   6069 {
   6070 	void *vih;
   6071 	kcpuset_t *affinity;
   6072 	int qidx, error, intr_idx, txrx_established;
   6073 	pci_chipset_tag_t pc = sc->sc_pc;
   6074 	const char *intrstr = NULL;
   6075 	char intrbuf[PCI_INTRSTR_LEN];
   6076 	char intr_xname[INTRDEVNAMEBUF];
   6077 
   6078 	if (sc->sc_nqueues < ncpu) {
   6079 		/*
   6080 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   6081 		 * interrupts start from CPU#1.
   6082 		 */
   6083 		sc->sc_affinity_offset = 1;
   6084 	} else {
   6085 		/*
   6086 		 * In this case, this device use all CPUs. So, we unify
   6087 		 * affinitied cpu_index to msix vector number for readability.
   6088 		 */
   6089 		sc->sc_affinity_offset = 0;
   6090 	}
   6091 
   6092 	error = wm_alloc_txrx_queues(sc);
   6093 	if (error) {
   6094 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6095 		    error);
   6096 		return ENOMEM;
   6097 	}
   6098 
   6099 	kcpuset_create(&affinity, false);
   6100 	intr_idx = 0;
   6101 
   6102 	/*
   6103 	 * TX and RX
   6104 	 */
   6105 	txrx_established = 0;
   6106 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6107 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6108 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   6109 
   6110 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6111 		    sizeof(intrbuf));
   6112 #ifdef WM_MPSAFE
   6113 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   6114 		    PCI_INTR_MPSAFE, true);
   6115 #endif
   6116 		memset(intr_xname, 0, sizeof(intr_xname));
   6117 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   6118 		    device_xname(sc->sc_dev), qidx);
   6119 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6120 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   6121 		if (vih == NULL) {
   6122 			aprint_error_dev(sc->sc_dev,
   6123 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   6124 			    intrstr ? " at " : "",
   6125 			    intrstr ? intrstr : "");
   6126 
   6127 			goto fail;
   6128 		}
   6129 		kcpuset_zero(affinity);
   6130 		/* Round-robin affinity */
   6131 		kcpuset_set(affinity, affinity_to);
   6132 		error = interrupt_distribute(vih, affinity, NULL);
   6133 		if (error == 0) {
   6134 			aprint_normal_dev(sc->sc_dev,
   6135 			    "for TX and RX interrupting at %s affinity to %u\n",
   6136 			    intrstr, affinity_to);
   6137 		} else {
   6138 			aprint_normal_dev(sc->sc_dev,
   6139 			    "for TX and RX interrupting at %s\n", intrstr);
   6140 		}
   6141 		sc->sc_ihs[intr_idx] = vih;
   6142 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   6143 			goto fail;
   6144 		txrx_established++;
   6145 		intr_idx++;
   6146 	}
   6147 
   6148 	/* LINK */
   6149 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6150 	    sizeof(intrbuf));
   6151 #ifdef WM_MPSAFE
   6152 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   6153 #endif
   6154 	memset(intr_xname, 0, sizeof(intr_xname));
   6155 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   6156 	    device_xname(sc->sc_dev));
   6157 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6158 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   6159 	if (vih == NULL) {
   6160 		aprint_error_dev(sc->sc_dev,
   6161 		    "unable to establish MSI-X(for LINK)%s%s\n",
   6162 		    intrstr ? " at " : "",
   6163 		    intrstr ? intrstr : "");
   6164 
   6165 		goto fail;
   6166 	}
   6167 	/* Keep default affinity to LINK interrupt */
   6168 	aprint_normal_dev(sc->sc_dev,
   6169 	    "for LINK interrupting at %s\n", intrstr);
   6170 	sc->sc_ihs[intr_idx] = vih;
   6171 	sc->sc_link_intr_idx = intr_idx;
   6172 
   6173 	sc->sc_nintrs = sc->sc_nqueues + 1;
   6174 	kcpuset_destroy(affinity);
   6175 	return 0;
   6176 
   6177  fail:
   6178 	for (qidx = 0; qidx < txrx_established; qidx++) {
   6179 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6180 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   6181 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6182 	}
   6183 
   6184 	kcpuset_destroy(affinity);
   6185 	return ENOMEM;
   6186 }
   6187 
   6188 static void
   6189 wm_unset_stopping_flags(struct wm_softc *sc)
   6190 {
   6191 	int i;
   6192 
   6193 	KASSERT(WM_CORE_LOCKED(sc));
   6194 
   6195 	/* Must unset stopping flags in ascending order. */
   6196 	for (i = 0; i < sc->sc_nqueues; i++) {
   6197 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6198 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6199 
   6200 		mutex_enter(txq->txq_lock);
   6201 		txq->txq_stopping = false;
   6202 		mutex_exit(txq->txq_lock);
   6203 
   6204 		mutex_enter(rxq->rxq_lock);
   6205 		rxq->rxq_stopping = false;
   6206 		mutex_exit(rxq->rxq_lock);
   6207 	}
   6208 
   6209 	sc->sc_core_stopping = false;
   6210 }
   6211 
   6212 static void
   6213 wm_set_stopping_flags(struct wm_softc *sc)
   6214 {
   6215 	int i;
   6216 
   6217 	KASSERT(WM_CORE_LOCKED(sc));
   6218 
   6219 	sc->sc_core_stopping = true;
   6220 
   6221 	/* Must set stopping flags in ascending order. */
   6222 	for (i = 0; i < sc->sc_nqueues; i++) {
   6223 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6224 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6225 
   6226 		mutex_enter(rxq->rxq_lock);
   6227 		rxq->rxq_stopping = true;
   6228 		mutex_exit(rxq->rxq_lock);
   6229 
   6230 		mutex_enter(txq->txq_lock);
   6231 		txq->txq_stopping = true;
   6232 		mutex_exit(txq->txq_lock);
   6233 	}
   6234 }
   6235 
   6236 /*
   6237  * Write interrupt interval value to ITR or EITR
   6238  */
   6239 static void
   6240 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   6241 {
   6242 
   6243 	if (!wmq->wmq_set_itr)
   6244 		return;
   6245 
   6246 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6247 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   6248 
   6249 		/*
   6250 		 * 82575 doesn't have CNT_INGR field.
   6251 		 * So, overwrite counter field by software.
   6252 		 */
   6253 		if (sc->sc_type == WM_T_82575)
   6254 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   6255 		else
   6256 			eitr |= EITR_CNT_INGR;
   6257 
   6258 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   6259 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   6260 		/*
   6261 		 * 82574 has both ITR and EITR. SET EITR when we use
   6262 		 * the multi queue function with MSI-X.
   6263 		 */
   6264 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   6265 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   6266 	} else {
   6267 		KASSERT(wmq->wmq_id == 0);
   6268 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   6269 	}
   6270 
   6271 	wmq->wmq_set_itr = false;
   6272 }
   6273 
   6274 /*
   6275  * TODO
   6276  * Below dynamic calculation of itr is almost the same as Linux igb,
   6277  * however it does not fit to wm(4). So, we will have been disable AIM
   6278  * until we will find appropriate calculation of itr.
   6279  */
   6280 /*
   6281  * Calculate interrupt interval value to be going to write register in
   6282  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   6283  */
   6284 static void
   6285 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   6286 {
   6287 #ifdef NOTYET
   6288 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6289 	struct wm_txqueue *txq = &wmq->wmq_txq;
   6290 	uint32_t avg_size = 0;
   6291 	uint32_t new_itr;
   6292 
   6293 	if (rxq->rxq_packets)
   6294 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   6295 	if (txq->txq_packets)
   6296 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   6297 
   6298 	if (avg_size == 0) {
   6299 		new_itr = 450; /* restore default value */
   6300 		goto out;
   6301 	}
   6302 
   6303 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   6304 	avg_size += 24;
   6305 
   6306 	/* Don't starve jumbo frames */
   6307 	avg_size = uimin(avg_size, 3000);
   6308 
   6309 	/* Give a little boost to mid-size frames */
   6310 	if ((avg_size > 300) && (avg_size < 1200))
   6311 		new_itr = avg_size / 3;
   6312 	else
   6313 		new_itr = avg_size / 2;
   6314 
   6315 out:
   6316 	/*
   6317 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   6318 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   6319 	 */
   6320 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   6321 		new_itr *= 4;
   6322 
   6323 	if (new_itr != wmq->wmq_itr) {
   6324 		wmq->wmq_itr = new_itr;
   6325 		wmq->wmq_set_itr = true;
   6326 	} else
   6327 		wmq->wmq_set_itr = false;
   6328 
   6329 	rxq->rxq_packets = 0;
   6330 	rxq->rxq_bytes = 0;
   6331 	txq->txq_packets = 0;
   6332 	txq->txq_bytes = 0;
   6333 #endif
   6334 }
   6335 
   6336 static void
   6337 wm_init_sysctls(struct wm_softc *sc)
   6338 {
   6339 	struct sysctllog **log;
   6340 	const struct sysctlnode *rnode, *qnode, *cnode;
   6341 	int i, rv;
   6342 	const char *dvname;
   6343 
   6344 	log = &sc->sc_sysctllog;
   6345 	dvname = device_xname(sc->sc_dev);
   6346 
   6347 	rv = sysctl_createv(log, 0, NULL, &rnode,
   6348 	    0, CTLTYPE_NODE, dvname,
   6349 	    SYSCTL_DESCR("wm information and settings"),
   6350 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   6351 	if (rv != 0)
   6352 		goto err;
   6353 
   6354 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6355 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   6356 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   6357 	if (rv != 0)
   6358 		goto teardown;
   6359 
   6360 	for (i = 0; i < sc->sc_nqueues; i++) {
   6361 		struct wm_queue *wmq = &sc->sc_queue[i];
   6362 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6363 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6364 
   6365 		snprintf(sc->sc_queue[i].sysctlname,
   6366 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   6367 
   6368 		if (sysctl_createv(log, 0, &rnode, &qnode,
   6369 		    0, CTLTYPE_NODE,
   6370 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   6371 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   6372 			break;
   6373 
   6374 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6375 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6376 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   6377 		    NULL, 0, &txq->txq_free,
   6378 		    0, CTL_CREATE, CTL_EOL) != 0)
   6379 			break;
   6380 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6381 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6382 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
   6383 		    wm_sysctl_tdh_handler, 0, (void *)txq,
   6384 		    0, CTL_CREATE, CTL_EOL) != 0)
   6385 			break;
   6386 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6387 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6388 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
   6389 		    wm_sysctl_tdt_handler, 0, (void *)txq,
   6390 		    0, CTL_CREATE, CTL_EOL) != 0)
   6391 			break;
   6392 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6393 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6394 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6395 		    NULL, 0, &txq->txq_next,
   6396 		    0, CTL_CREATE, CTL_EOL) != 0)
   6397 			break;
   6398 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6399 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6400 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
   6401 		    NULL, 0, &txq->txq_sfree,
   6402 		    0, CTL_CREATE, CTL_EOL) != 0)
   6403 			break;
   6404 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6405 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6406 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
   6407 		    NULL, 0, &txq->txq_snext,
   6408 		    0, CTL_CREATE, CTL_EOL) != 0)
   6409 			break;
   6410 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6411 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6412 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
   6413 		    NULL, 0, &txq->txq_sdirty,
   6414 		    0, CTL_CREATE, CTL_EOL) != 0)
   6415 			break;
   6416 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6417 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6418 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
   6419 		    NULL, 0, &txq->txq_flags,
   6420 		    0, CTL_CREATE, CTL_EOL) != 0)
   6421 			break;
   6422 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6423 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6424 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
   6425 		    NULL, 0, &txq->txq_stopping,
   6426 		    0, CTL_CREATE, CTL_EOL) != 0)
   6427 			break;
   6428 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6429 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6430 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
   6431 		    NULL, 0, &txq->txq_sending,
   6432 		    0, CTL_CREATE, CTL_EOL) != 0)
   6433 			break;
   6434 
   6435 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6436 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6437 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6438 		    NULL, 0, &rxq->rxq_ptr,
   6439 		    0, CTL_CREATE, CTL_EOL) != 0)
   6440 			break;
   6441 	}
   6442 
   6443 #ifdef WM_DEBUG
   6444 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6445 	    CTLTYPE_INT, "debug_flags",
   6446 	    SYSCTL_DESCR(
   6447 		    "Debug flags:\n"	\
   6448 		    "\t0x01 LINK\n"	\
   6449 		    "\t0x02 TX\n"	\
   6450 		    "\t0x04 RX\n"	\
   6451 		    "\t0x08 GMII\n"	\
   6452 		    "\t0x10 MANAGE\n"	\
   6453 		    "\t0x20 NVM\n"	\
   6454 		    "\t0x40 INIT\n"	\
   6455 		    "\t0x80 LOCK"),
   6456 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6457 	if (rv != 0)
   6458 		goto teardown;
   6459 #endif
   6460 
   6461 	return;
   6462 
   6463 teardown:
   6464 	sysctl_teardown(log);
   6465 err:
   6466 	sc->sc_sysctllog = NULL;
   6467 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6468 	    __func__, rv);
   6469 }
   6470 
   6471 /*
   6472  * wm_init:		[ifnet interface function]
   6473  *
   6474  *	Initialize the interface.
   6475  */
   6476 static int
   6477 wm_init(struct ifnet *ifp)
   6478 {
   6479 	struct wm_softc *sc = ifp->if_softc;
   6480 	int ret;
   6481 
   6482 	KASSERT(IFNET_LOCKED(ifp));
   6483 
   6484 	WM_CORE_LOCK(sc);
   6485 	ret = wm_init_locked(ifp);
   6486 	WM_CORE_UNLOCK(sc);
   6487 
   6488 	return ret;
   6489 }
   6490 
   6491 static int
   6492 wm_init_locked(struct ifnet *ifp)
   6493 {
   6494 	struct wm_softc *sc = ifp->if_softc;
   6495 	struct ethercom *ec = &sc->sc_ethercom;
   6496 	int i, j, trynum, error = 0;
   6497 	uint32_t reg, sfp_mask = 0;
   6498 
   6499 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6500 		device_xname(sc->sc_dev), __func__));
   6501 	KASSERT(IFNET_LOCKED(ifp));
   6502 	KASSERT(WM_CORE_LOCKED(sc));
   6503 
   6504 	/*
   6505 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6506 	 * There is a small but measurable benefit to avoiding the adjusment
   6507 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6508 	 * on such platforms.  One possibility is that the DMA itself is
   6509 	 * slightly more efficient if the front of the entire packet (instead
   6510 	 * of the front of the headers) is aligned.
   6511 	 *
   6512 	 * Note we must always set align_tweak to 0 if we are using
   6513 	 * jumbo frames.
   6514 	 */
   6515 #ifdef __NO_STRICT_ALIGNMENT
   6516 	sc->sc_align_tweak = 0;
   6517 #else
   6518 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6519 		sc->sc_align_tweak = 0;
   6520 	else
   6521 		sc->sc_align_tweak = 2;
   6522 #endif /* __NO_STRICT_ALIGNMENT */
   6523 
   6524 	/* Cancel any pending I/O. */
   6525 	wm_stop_locked(ifp, false, false);
   6526 
   6527 	/* Update statistics before reset */
   6528 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6529 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6530 
   6531 	/* >= PCH_SPT hardware workaround before reset. */
   6532 	if (sc->sc_type >= WM_T_PCH_SPT)
   6533 		wm_flush_desc_rings(sc);
   6534 
   6535 	/* Reset the chip to a known state. */
   6536 	wm_reset(sc);
   6537 
   6538 	/*
   6539 	 * AMT based hardware can now take control from firmware
   6540 	 * Do this after reset.
   6541 	 */
   6542 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6543 		wm_get_hw_control(sc);
   6544 
   6545 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6546 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6547 		wm_legacy_irq_quirk_spt(sc);
   6548 
   6549 	/* Init hardware bits */
   6550 	wm_initialize_hardware_bits(sc);
   6551 
   6552 	/* Reset the PHY. */
   6553 	if (sc->sc_flags & WM_F_HAS_MII)
   6554 		wm_gmii_reset(sc);
   6555 
   6556 	if (sc->sc_type >= WM_T_ICH8) {
   6557 		reg = CSR_READ(sc, WMREG_GCR);
   6558 		/*
   6559 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6560 		 * default after reset.
   6561 		 */
   6562 		if (sc->sc_type == WM_T_ICH8)
   6563 			reg |= GCR_NO_SNOOP_ALL;
   6564 		else
   6565 			reg &= ~GCR_NO_SNOOP_ALL;
   6566 		CSR_WRITE(sc, WMREG_GCR, reg);
   6567 	}
   6568 
   6569 	if ((sc->sc_type >= WM_T_ICH8)
   6570 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6571 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6572 
   6573 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6574 		reg |= CTRL_EXT_RO_DIS;
   6575 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6576 	}
   6577 
   6578 	/* Calculate (E)ITR value */
   6579 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6580 		/*
   6581 		 * For NEWQUEUE's EITR (except for 82575).
   6582 		 * 82575's EITR should be set same throttling value as other
   6583 		 * old controllers' ITR because the interrupt/sec calculation
   6584 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6585 		 *
   6586 		 * 82574's EITR should be set same throttling value as ITR.
   6587 		 *
   6588 		 * For N interrupts/sec, set this value to:
   6589 		 * 1,000,000 / N in contrast to ITR throttling value.
   6590 		 */
   6591 		sc->sc_itr_init = 450;
   6592 	} else if (sc->sc_type >= WM_T_82543) {
   6593 		/*
   6594 		 * Set up the interrupt throttling register (units of 256ns)
   6595 		 * Note that a footnote in Intel's documentation says this
   6596 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6597 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6598 		 * that that is also true for the 1024ns units of the other
   6599 		 * interrupt-related timer registers -- so, really, we ought
   6600 		 * to divide this value by 4 when the link speed is low.
   6601 		 *
   6602 		 * XXX implement this division at link speed change!
   6603 		 */
   6604 
   6605 		/*
   6606 		 * For N interrupts/sec, set this value to:
   6607 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6608 		 * absolute and packet timer values to this value
   6609 		 * divided by 4 to get "simple timer" behavior.
   6610 		 */
   6611 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6612 	}
   6613 
   6614 	error = wm_init_txrx_queues(sc);
   6615 	if (error)
   6616 		goto out;
   6617 
   6618 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6619 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6620 	    (sc->sc_type >= WM_T_82575))
   6621 		wm_serdes_power_up_link_82575(sc);
   6622 
   6623 	/* Clear out the VLAN table -- we don't use it (yet). */
   6624 	CSR_WRITE(sc, WMREG_VET, 0);
   6625 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6626 		trynum = 10; /* Due to hw errata */
   6627 	else
   6628 		trynum = 1;
   6629 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6630 		for (j = 0; j < trynum; j++)
   6631 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6632 
   6633 	/*
   6634 	 * Set up flow-control parameters.
   6635 	 *
   6636 	 * XXX Values could probably stand some tuning.
   6637 	 */
   6638 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6639 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6640 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6641 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6642 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6643 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6644 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6645 	}
   6646 
   6647 	sc->sc_fcrtl = FCRTL_DFLT;
   6648 	if (sc->sc_type < WM_T_82543) {
   6649 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6650 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6651 	} else {
   6652 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6653 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6654 	}
   6655 
   6656 	if (sc->sc_type == WM_T_80003)
   6657 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6658 	else
   6659 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6660 
   6661 	/* Writes the control register. */
   6662 	wm_set_vlan(sc);
   6663 
   6664 	if (sc->sc_flags & WM_F_HAS_MII) {
   6665 		uint16_t kmreg;
   6666 
   6667 		switch (sc->sc_type) {
   6668 		case WM_T_80003:
   6669 		case WM_T_ICH8:
   6670 		case WM_T_ICH9:
   6671 		case WM_T_ICH10:
   6672 		case WM_T_PCH:
   6673 		case WM_T_PCH2:
   6674 		case WM_T_PCH_LPT:
   6675 		case WM_T_PCH_SPT:
   6676 		case WM_T_PCH_CNP:
   6677 			/*
   6678 			 * Set the mac to wait the maximum time between each
   6679 			 * iteration and increase the max iterations when
   6680 			 * polling the phy; this fixes erroneous timeouts at
   6681 			 * 10Mbps.
   6682 			 */
   6683 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6684 			    0xFFFF);
   6685 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6686 			    &kmreg);
   6687 			kmreg |= 0x3F;
   6688 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6689 			    kmreg);
   6690 			break;
   6691 		default:
   6692 			break;
   6693 		}
   6694 
   6695 		if (sc->sc_type == WM_T_80003) {
   6696 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6697 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6698 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6699 
   6700 			/* Bypass RX and TX FIFOs */
   6701 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6702 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6703 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6704 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6705 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6706 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6707 		}
   6708 	}
   6709 #if 0
   6710 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6711 #endif
   6712 
   6713 	/* Set up checksum offload parameters. */
   6714 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6715 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6716 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6717 		reg |= RXCSUM_IPOFL;
   6718 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6719 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6720 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6721 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6722 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6723 
   6724 	/* Set registers about MSI-X */
   6725 	if (wm_is_using_msix(sc)) {
   6726 		uint32_t ivar, qintr_idx;
   6727 		struct wm_queue *wmq;
   6728 		unsigned int qid;
   6729 
   6730 		if (sc->sc_type == WM_T_82575) {
   6731 			/* Interrupt control */
   6732 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6733 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6734 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6735 
   6736 			/* TX and RX */
   6737 			for (i = 0; i < sc->sc_nqueues; i++) {
   6738 				wmq = &sc->sc_queue[i];
   6739 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6740 				    EITR_TX_QUEUE(wmq->wmq_id)
   6741 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6742 			}
   6743 			/* Link status */
   6744 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6745 			    EITR_OTHER);
   6746 		} else if (sc->sc_type == WM_T_82574) {
   6747 			/* Interrupt control */
   6748 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6749 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6750 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6751 
   6752 			/*
   6753 			 * Work around issue with spurious interrupts
   6754 			 * in MSI-X mode.
   6755 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6756 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6757 			 */
   6758 			reg = CSR_READ(sc, WMREG_RFCTL);
   6759 			reg |= WMREG_RFCTL_ACKDIS;
   6760 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6761 
   6762 			ivar = 0;
   6763 			/* TX and RX */
   6764 			for (i = 0; i < sc->sc_nqueues; i++) {
   6765 				wmq = &sc->sc_queue[i];
   6766 				qid = wmq->wmq_id;
   6767 				qintr_idx = wmq->wmq_intr_idx;
   6768 
   6769 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6770 				    IVAR_TX_MASK_Q_82574(qid));
   6771 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6772 				    IVAR_RX_MASK_Q_82574(qid));
   6773 			}
   6774 			/* Link status */
   6775 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6776 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6777 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6778 		} else {
   6779 			/* Interrupt control */
   6780 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6781 			    | GPIE_EIAME | GPIE_PBA);
   6782 
   6783 			switch (sc->sc_type) {
   6784 			case WM_T_82580:
   6785 			case WM_T_I350:
   6786 			case WM_T_I354:
   6787 			case WM_T_I210:
   6788 			case WM_T_I211:
   6789 				/* TX and RX */
   6790 				for (i = 0; i < sc->sc_nqueues; i++) {
   6791 					wmq = &sc->sc_queue[i];
   6792 					qid = wmq->wmq_id;
   6793 					qintr_idx = wmq->wmq_intr_idx;
   6794 
   6795 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6796 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6797 					ivar |= __SHIFTIN((qintr_idx
   6798 						| IVAR_VALID),
   6799 					    IVAR_TX_MASK_Q(qid));
   6800 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6801 					ivar |= __SHIFTIN((qintr_idx
   6802 						| IVAR_VALID),
   6803 					    IVAR_RX_MASK_Q(qid));
   6804 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6805 				}
   6806 				break;
   6807 			case WM_T_82576:
   6808 				/* TX and RX */
   6809 				for (i = 0; i < sc->sc_nqueues; i++) {
   6810 					wmq = &sc->sc_queue[i];
   6811 					qid = wmq->wmq_id;
   6812 					qintr_idx = wmq->wmq_intr_idx;
   6813 
   6814 					ivar = CSR_READ(sc,
   6815 					    WMREG_IVAR_Q_82576(qid));
   6816 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6817 					ivar |= __SHIFTIN((qintr_idx
   6818 						| IVAR_VALID),
   6819 					    IVAR_TX_MASK_Q_82576(qid));
   6820 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6821 					ivar |= __SHIFTIN((qintr_idx
   6822 						| IVAR_VALID),
   6823 					    IVAR_RX_MASK_Q_82576(qid));
   6824 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6825 					    ivar);
   6826 				}
   6827 				break;
   6828 			default:
   6829 				break;
   6830 			}
   6831 
   6832 			/* Link status */
   6833 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6834 			    IVAR_MISC_OTHER);
   6835 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6836 		}
   6837 
   6838 		if (wm_is_using_multiqueue(sc)) {
   6839 			wm_init_rss(sc);
   6840 
   6841 			/*
   6842 			** NOTE: Receive Full-Packet Checksum Offload
   6843 			** is mutually exclusive with Multiqueue. However
   6844 			** this is not the same as TCP/IP checksums which
   6845 			** still work.
   6846 			*/
   6847 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6848 			reg |= RXCSUM_PCSD;
   6849 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6850 		}
   6851 	}
   6852 
   6853 	/* Set up the interrupt registers. */
   6854 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6855 
   6856 	/* Enable SFP module insertion interrupt if it's required */
   6857 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6858 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6859 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6860 		sfp_mask = ICR_GPI(0);
   6861 	}
   6862 
   6863 	if (wm_is_using_msix(sc)) {
   6864 		uint32_t mask;
   6865 		struct wm_queue *wmq;
   6866 
   6867 		switch (sc->sc_type) {
   6868 		case WM_T_82574:
   6869 			mask = 0;
   6870 			for (i = 0; i < sc->sc_nqueues; i++) {
   6871 				wmq = &sc->sc_queue[i];
   6872 				mask |= ICR_TXQ(wmq->wmq_id);
   6873 				mask |= ICR_RXQ(wmq->wmq_id);
   6874 			}
   6875 			mask |= ICR_OTHER;
   6876 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6877 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6878 			break;
   6879 		default:
   6880 			if (sc->sc_type == WM_T_82575) {
   6881 				mask = 0;
   6882 				for (i = 0; i < sc->sc_nqueues; i++) {
   6883 					wmq = &sc->sc_queue[i];
   6884 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6885 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6886 				}
   6887 				mask |= EITR_OTHER;
   6888 			} else {
   6889 				mask = 0;
   6890 				for (i = 0; i < sc->sc_nqueues; i++) {
   6891 					wmq = &sc->sc_queue[i];
   6892 					mask |= 1 << wmq->wmq_intr_idx;
   6893 				}
   6894 				mask |= 1 << sc->sc_link_intr_idx;
   6895 			}
   6896 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6897 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6898 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6899 
   6900 			/* For other interrupts */
   6901 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6902 			break;
   6903 		}
   6904 	} else {
   6905 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6906 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6907 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6908 	}
   6909 
   6910 	/* Set up the inter-packet gap. */
   6911 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6912 
   6913 	if (sc->sc_type >= WM_T_82543) {
   6914 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6915 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6916 			wm_itrs_writereg(sc, wmq);
   6917 		}
   6918 		/*
   6919 		 * Link interrupts occur much less than TX
   6920 		 * interrupts and RX interrupts. So, we don't
   6921 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6922 		 * FreeBSD's if_igb.
   6923 		 */
   6924 	}
   6925 
   6926 	/* Set the VLAN EtherType. */
   6927 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6928 
   6929 	/*
   6930 	 * Set up the transmit control register; we start out with
   6931 	 * a collision distance suitable for FDX, but update it when
   6932 	 * we resolve the media type.
   6933 	 */
   6934 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6935 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6936 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6937 	if (sc->sc_type >= WM_T_82571)
   6938 		sc->sc_tctl |= TCTL_MULR;
   6939 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6940 
   6941 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6942 		/* Write TDT after TCTL.EN is set. See the document. */
   6943 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6944 	}
   6945 
   6946 	if (sc->sc_type == WM_T_80003) {
   6947 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6948 		reg &= ~TCTL_EXT_GCEX_MASK;
   6949 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6950 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6951 	}
   6952 
   6953 	/* Set the media. */
   6954 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6955 		goto out;
   6956 
   6957 	/* Configure for OS presence */
   6958 	wm_init_manageability(sc);
   6959 
   6960 	/*
   6961 	 * Set up the receive control register; we actually program the
   6962 	 * register when we set the receive filter. Use multicast address
   6963 	 * offset type 0.
   6964 	 *
   6965 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6966 	 * don't enable that feature.
   6967 	 */
   6968 	sc->sc_mchash_type = 0;
   6969 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6970 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6971 
   6972 	/* 82574 use one buffer extended Rx descriptor. */
   6973 	if (sc->sc_type == WM_T_82574)
   6974 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6975 
   6976 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   6977 		sc->sc_rctl |= RCTL_SECRC;
   6978 
   6979 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6980 	    && (ifp->if_mtu > ETHERMTU)) {
   6981 		sc->sc_rctl |= RCTL_LPE;
   6982 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6983 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6984 	}
   6985 
   6986 	if (MCLBYTES == 2048)
   6987 		sc->sc_rctl |= RCTL_2k;
   6988 	else {
   6989 		if (sc->sc_type >= WM_T_82543) {
   6990 			switch (MCLBYTES) {
   6991 			case 4096:
   6992 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6993 				break;
   6994 			case 8192:
   6995 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6996 				break;
   6997 			case 16384:
   6998 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6999 				break;
   7000 			default:
   7001 				panic("wm_init: MCLBYTES %d unsupported",
   7002 				    MCLBYTES);
   7003 				break;
   7004 			}
   7005 		} else
   7006 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   7007 	}
   7008 
   7009 	/* Enable ECC */
   7010 	switch (sc->sc_type) {
   7011 	case WM_T_82571:
   7012 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   7013 		reg |= PBA_ECC_CORR_EN;
   7014 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   7015 		break;
   7016 	case WM_T_PCH_LPT:
   7017 	case WM_T_PCH_SPT:
   7018 	case WM_T_PCH_CNP:
   7019 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   7020 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   7021 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   7022 
   7023 		sc->sc_ctrl |= CTRL_MEHE;
   7024 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7025 		break;
   7026 	default:
   7027 		break;
   7028 	}
   7029 
   7030 	/*
   7031 	 * Set the receive filter.
   7032 	 *
   7033 	 * For 82575 and 82576, the RX descriptors must be initialized after
   7034 	 * the setting of RCTL.EN in wm_set_filter()
   7035 	 */
   7036 	wm_set_filter(sc);
   7037 
   7038 	/* On 575 and later set RDT only if RX enabled */
   7039 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7040 		int qidx;
   7041 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7042 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   7043 			for (i = 0; i < WM_NRXDESC; i++) {
   7044 				mutex_enter(rxq->rxq_lock);
   7045 				wm_init_rxdesc(rxq, i);
   7046 				mutex_exit(rxq->rxq_lock);
   7047 
   7048 			}
   7049 		}
   7050 	}
   7051 
   7052 	wm_unset_stopping_flags(sc);
   7053 
   7054 	/* Start the one second link check clock. */
   7055 	callout_schedule(&sc->sc_tick_ch, hz);
   7056 
   7057 	/*
   7058 	 * ...all done! (IFNET_LOCKED asserted above.)
   7059 	 */
   7060 	ifp->if_flags |= IFF_RUNNING;
   7061 
   7062  out:
   7063 	/* Save last flags for the callback */
   7064 	sc->sc_if_flags = ifp->if_flags;
   7065 	sc->sc_ec_capenable = ec->ec_capenable;
   7066 	if (error)
   7067 		log(LOG_ERR, "%s: interface not running\n",
   7068 		    device_xname(sc->sc_dev));
   7069 	return error;
   7070 }
   7071 
   7072 /*
   7073  * wm_stop:		[ifnet interface function]
   7074  *
   7075  *	Stop transmission on the interface.
   7076  */
   7077 static void
   7078 wm_stop(struct ifnet *ifp, int disable)
   7079 {
   7080 	struct wm_softc *sc = ifp->if_softc;
   7081 
   7082 	ASSERT_SLEEPABLE();
   7083 
   7084 	WM_CORE_LOCK(sc);
   7085 	wm_stop_locked(ifp, disable ? true : false, true);
   7086 	WM_CORE_UNLOCK(sc);
   7087 
   7088 	/*
   7089 	 * After wm_set_stopping_flags(), it is guaranteed that
   7090 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   7091 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   7092 	 * because it can sleep...
   7093 	 * so, call workqueue_wait() here.
   7094 	 */
   7095 	for (int i = 0; i < sc->sc_nqueues; i++)
   7096 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   7097 }
   7098 
   7099 static void
   7100 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   7101 {
   7102 	struct wm_softc *sc = ifp->if_softc;
   7103 	struct wm_txsoft *txs;
   7104 	int i, qidx;
   7105 
   7106 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7107 		device_xname(sc->sc_dev), __func__));
   7108 	KASSERT(WM_CORE_LOCKED(sc));
   7109 
   7110 	wm_set_stopping_flags(sc);
   7111 
   7112 	if (sc->sc_flags & WM_F_HAS_MII) {
   7113 		/* Down the MII. */
   7114 		mii_down(&sc->sc_mii);
   7115 	} else {
   7116 #if 0
   7117 		/* Should we clear PHY's status properly? */
   7118 		wm_reset(sc);
   7119 #endif
   7120 	}
   7121 
   7122 	/* Stop the transmit and receive processes. */
   7123 	CSR_WRITE(sc, WMREG_TCTL, 0);
   7124 	CSR_WRITE(sc, WMREG_RCTL, 0);
   7125 	sc->sc_rctl &= ~RCTL_EN;
   7126 
   7127 	/*
   7128 	 * Clear the interrupt mask to ensure the device cannot assert its
   7129 	 * interrupt line.
   7130 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   7131 	 * service any currently pending or shared interrupt.
   7132 	 */
   7133 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7134 	sc->sc_icr = 0;
   7135 	if (wm_is_using_msix(sc)) {
   7136 		if (sc->sc_type != WM_T_82574) {
   7137 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   7138 			CSR_WRITE(sc, WMREG_EIAC, 0);
   7139 		} else
   7140 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   7141 	}
   7142 
   7143 	/*
   7144 	 * Stop callouts after interrupts are disabled; if we have
   7145 	 * to wait for them, we will be releasing the CORE_LOCK
   7146 	 * briefly, which will unblock interrupts on the current CPU.
   7147 	 */
   7148 
   7149 	/* Stop the one second clock. */
   7150 	if (wait)
   7151 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   7152 	else
   7153 		callout_stop(&sc->sc_tick_ch);
   7154 
   7155 	/* Stop the 82547 Tx FIFO stall check timer. */
   7156 	if (sc->sc_type == WM_T_82547) {
   7157 		if (wait)
   7158 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   7159 		else
   7160 			callout_stop(&sc->sc_txfifo_ch);
   7161 	}
   7162 
   7163 	/* Release any queued transmit buffers. */
   7164 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7165 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   7166 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7167 		struct mbuf *m;
   7168 
   7169 		mutex_enter(txq->txq_lock);
   7170 		txq->txq_sending = false; /* Ensure watchdog disabled */
   7171 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7172 			txs = &txq->txq_soft[i];
   7173 			if (txs->txs_mbuf != NULL) {
   7174 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   7175 				m_freem(txs->txs_mbuf);
   7176 				txs->txs_mbuf = NULL;
   7177 			}
   7178 		}
   7179 		/* Drain txq_interq */
   7180 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7181 			m_freem(m);
   7182 		mutex_exit(txq->txq_lock);
   7183 	}
   7184 
   7185 	/* Mark the interface as down and cancel the watchdog timer. */
   7186 	ifp->if_flags &= ~IFF_RUNNING;
   7187 
   7188 	if (disable) {
   7189 		for (i = 0; i < sc->sc_nqueues; i++) {
   7190 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7191 			mutex_enter(rxq->rxq_lock);
   7192 			wm_rxdrain(rxq);
   7193 			mutex_exit(rxq->rxq_lock);
   7194 		}
   7195 	}
   7196 
   7197 #if 0 /* notyet */
   7198 	if (sc->sc_type >= WM_T_82544)
   7199 		CSR_WRITE(sc, WMREG_WUC, 0);
   7200 #endif
   7201 }
   7202 
   7203 static void
   7204 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   7205 {
   7206 	struct mbuf *m;
   7207 	int i;
   7208 
   7209 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   7210 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   7211 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   7212 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   7213 		    m->m_data, m->m_len, m->m_flags);
   7214 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   7215 	    i, i == 1 ? "" : "s");
   7216 }
   7217 
   7218 /*
   7219  * wm_82547_txfifo_stall:
   7220  *
   7221  *	Callout used to wait for the 82547 Tx FIFO to drain,
   7222  *	reset the FIFO pointers, and restart packet transmission.
   7223  */
   7224 static void
   7225 wm_82547_txfifo_stall(void *arg)
   7226 {
   7227 	struct wm_softc *sc = arg;
   7228 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7229 
   7230 	mutex_enter(txq->txq_lock);
   7231 
   7232 	if (txq->txq_stopping)
   7233 		goto out;
   7234 
   7235 	if (txq->txq_fifo_stall) {
   7236 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   7237 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   7238 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   7239 			/*
   7240 			 * Packets have drained.  Stop transmitter, reset
   7241 			 * FIFO pointers, restart transmitter, and kick
   7242 			 * the packet queue.
   7243 			 */
   7244 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   7245 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   7246 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   7247 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   7248 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   7249 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   7250 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   7251 			CSR_WRITE_FLUSH(sc);
   7252 
   7253 			txq->txq_fifo_head = 0;
   7254 			txq->txq_fifo_stall = 0;
   7255 			wm_start_locked(&sc->sc_ethercom.ec_if);
   7256 		} else {
   7257 			/*
   7258 			 * Still waiting for packets to drain; try again in
   7259 			 * another tick.
   7260 			 */
   7261 			callout_schedule(&sc->sc_txfifo_ch, 1);
   7262 		}
   7263 	}
   7264 
   7265 out:
   7266 	mutex_exit(txq->txq_lock);
   7267 }
   7268 
   7269 /*
   7270  * wm_82547_txfifo_bugchk:
   7271  *
   7272  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   7273  *	prevent enqueueing a packet that would wrap around the end
   7274  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   7275  *
   7276  *	We do this by checking the amount of space before the end
   7277  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   7278  *	the Tx FIFO, wait for all remaining packets to drain, reset
   7279  *	the internal FIFO pointers to the beginning, and restart
   7280  *	transmission on the interface.
   7281  */
   7282 #define	WM_FIFO_HDR		0x10
   7283 #define	WM_82547_PAD_LEN	0x3e0
   7284 static int
   7285 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   7286 {
   7287 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7288 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   7289 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   7290 
   7291 	/* Just return if already stalled. */
   7292 	if (txq->txq_fifo_stall)
   7293 		return 1;
   7294 
   7295 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7296 		/* Stall only occurs in half-duplex mode. */
   7297 		goto send_packet;
   7298 	}
   7299 
   7300 	if (len >= WM_82547_PAD_LEN + space) {
   7301 		txq->txq_fifo_stall = 1;
   7302 		callout_schedule(&sc->sc_txfifo_ch, 1);
   7303 		return 1;
   7304 	}
   7305 
   7306  send_packet:
   7307 	txq->txq_fifo_head += len;
   7308 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   7309 		txq->txq_fifo_head -= txq->txq_fifo_size;
   7310 
   7311 	return 0;
   7312 }
   7313 
   7314 static int
   7315 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7316 {
   7317 	int error;
   7318 
   7319 	/*
   7320 	 * Allocate the control data structures, and create and load the
   7321 	 * DMA map for it.
   7322 	 *
   7323 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7324 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7325 	 * both sets within the same 4G segment.
   7326 	 */
   7327 	if (sc->sc_type < WM_T_82544)
   7328 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   7329 	else
   7330 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   7331 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7332 		txq->txq_descsize = sizeof(nq_txdesc_t);
   7333 	else
   7334 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   7335 
   7336 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   7337 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   7338 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   7339 		aprint_error_dev(sc->sc_dev,
   7340 		    "unable to allocate TX control data, error = %d\n",
   7341 		    error);
   7342 		goto fail_0;
   7343 	}
   7344 
   7345 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   7346 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   7347 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7348 		aprint_error_dev(sc->sc_dev,
   7349 		    "unable to map TX control data, error = %d\n", error);
   7350 		goto fail_1;
   7351 	}
   7352 
   7353 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   7354 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   7355 		aprint_error_dev(sc->sc_dev,
   7356 		    "unable to create TX control data DMA map, error = %d\n",
   7357 		    error);
   7358 		goto fail_2;
   7359 	}
   7360 
   7361 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   7362 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   7363 		aprint_error_dev(sc->sc_dev,
   7364 		    "unable to load TX control data DMA map, error = %d\n",
   7365 		    error);
   7366 		goto fail_3;
   7367 	}
   7368 
   7369 	return 0;
   7370 
   7371  fail_3:
   7372 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7373  fail_2:
   7374 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7375 	    WM_TXDESCS_SIZE(txq));
   7376  fail_1:
   7377 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7378  fail_0:
   7379 	return error;
   7380 }
   7381 
   7382 static void
   7383 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7384 {
   7385 
   7386 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   7387 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7388 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7389 	    WM_TXDESCS_SIZE(txq));
   7390 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7391 }
   7392 
   7393 static int
   7394 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7395 {
   7396 	int error;
   7397 	size_t rxq_descs_size;
   7398 
   7399 	/*
   7400 	 * Allocate the control data structures, and create and load the
   7401 	 * DMA map for it.
   7402 	 *
   7403 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7404 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7405 	 * both sets within the same 4G segment.
   7406 	 */
   7407 	rxq->rxq_ndesc = WM_NRXDESC;
   7408 	if (sc->sc_type == WM_T_82574)
   7409 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   7410 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7411 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   7412 	else
   7413 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   7414 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   7415 
   7416 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   7417 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   7418 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   7419 		aprint_error_dev(sc->sc_dev,
   7420 		    "unable to allocate RX control data, error = %d\n",
   7421 		    error);
   7422 		goto fail_0;
   7423 	}
   7424 
   7425 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   7426 		    rxq->rxq_desc_rseg, rxq_descs_size,
   7427 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7428 		aprint_error_dev(sc->sc_dev,
   7429 		    "unable to map RX control data, error = %d\n", error);
   7430 		goto fail_1;
   7431 	}
   7432 
   7433 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7434 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7435 		aprint_error_dev(sc->sc_dev,
   7436 		    "unable to create RX control data DMA map, error = %d\n",
   7437 		    error);
   7438 		goto fail_2;
   7439 	}
   7440 
   7441 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7442 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7443 		aprint_error_dev(sc->sc_dev,
   7444 		    "unable to load RX control data DMA map, error = %d\n",
   7445 		    error);
   7446 		goto fail_3;
   7447 	}
   7448 
   7449 	return 0;
   7450 
   7451  fail_3:
   7452 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7453  fail_2:
   7454 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7455 	    rxq_descs_size);
   7456  fail_1:
   7457 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7458  fail_0:
   7459 	return error;
   7460 }
   7461 
   7462 static void
   7463 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7464 {
   7465 
   7466 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7467 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7468 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7469 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7470 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7471 }
   7472 
   7473 
   7474 static int
   7475 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7476 {
   7477 	int i, error;
   7478 
   7479 	/* Create the transmit buffer DMA maps. */
   7480 	WM_TXQUEUELEN(txq) =
   7481 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7482 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7483 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7484 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7485 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7486 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7487 			aprint_error_dev(sc->sc_dev,
   7488 			    "unable to create Tx DMA map %d, error = %d\n",
   7489 			    i, error);
   7490 			goto fail;
   7491 		}
   7492 	}
   7493 
   7494 	return 0;
   7495 
   7496  fail:
   7497 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7498 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7499 			bus_dmamap_destroy(sc->sc_dmat,
   7500 			    txq->txq_soft[i].txs_dmamap);
   7501 	}
   7502 	return error;
   7503 }
   7504 
   7505 static void
   7506 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7507 {
   7508 	int i;
   7509 
   7510 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7511 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7512 			bus_dmamap_destroy(sc->sc_dmat,
   7513 			    txq->txq_soft[i].txs_dmamap);
   7514 	}
   7515 }
   7516 
   7517 static int
   7518 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7519 {
   7520 	int i, error;
   7521 
   7522 	/* Create the receive buffer DMA maps. */
   7523 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7524 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7525 			    MCLBYTES, 0, 0,
   7526 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7527 			aprint_error_dev(sc->sc_dev,
   7528 			    "unable to create Rx DMA map %d error = %d\n",
   7529 			    i, error);
   7530 			goto fail;
   7531 		}
   7532 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7533 	}
   7534 
   7535 	return 0;
   7536 
   7537  fail:
   7538 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7539 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7540 			bus_dmamap_destroy(sc->sc_dmat,
   7541 			    rxq->rxq_soft[i].rxs_dmamap);
   7542 	}
   7543 	return error;
   7544 }
   7545 
   7546 static void
   7547 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7548 {
   7549 	int i;
   7550 
   7551 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7552 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7553 			bus_dmamap_destroy(sc->sc_dmat,
   7554 			    rxq->rxq_soft[i].rxs_dmamap);
   7555 	}
   7556 }
   7557 
   7558 /*
   7559  * wm_alloc_quques:
   7560  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7561  */
   7562 static int
   7563 wm_alloc_txrx_queues(struct wm_softc *sc)
   7564 {
   7565 	int i, error, tx_done, rx_done;
   7566 
   7567 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7568 	    KM_SLEEP);
   7569 	if (sc->sc_queue == NULL) {
   7570 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7571 		error = ENOMEM;
   7572 		goto fail_0;
   7573 	}
   7574 
   7575 	/* For transmission */
   7576 	error = 0;
   7577 	tx_done = 0;
   7578 	for (i = 0; i < sc->sc_nqueues; i++) {
   7579 #ifdef WM_EVENT_COUNTERS
   7580 		int j;
   7581 		const char *xname;
   7582 #endif
   7583 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7584 		txq->txq_sc = sc;
   7585 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7586 
   7587 		error = wm_alloc_tx_descs(sc, txq);
   7588 		if (error)
   7589 			break;
   7590 		error = wm_alloc_tx_buffer(sc, txq);
   7591 		if (error) {
   7592 			wm_free_tx_descs(sc, txq);
   7593 			break;
   7594 		}
   7595 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7596 		if (txq->txq_interq == NULL) {
   7597 			wm_free_tx_descs(sc, txq);
   7598 			wm_free_tx_buffer(sc, txq);
   7599 			error = ENOMEM;
   7600 			break;
   7601 		}
   7602 
   7603 #ifdef WM_EVENT_COUNTERS
   7604 		xname = device_xname(sc->sc_dev);
   7605 
   7606 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7607 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7608 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7609 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7610 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7611 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7612 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7613 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7614 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7615 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7616 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7617 
   7618 		for (j = 0; j < WM_NTXSEGS; j++) {
   7619 			snprintf(txq->txq_txseg_evcnt_names[j],
   7620 			    sizeof(txq->txq_txseg_evcnt_names[j]),
   7621 			    "txq%02dtxseg%d", i, j);
   7622 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
   7623 			    EVCNT_TYPE_MISC,
   7624 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7625 		}
   7626 
   7627 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7628 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7629 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7630 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7631 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7632 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7633 #endif /* WM_EVENT_COUNTERS */
   7634 
   7635 		tx_done++;
   7636 	}
   7637 	if (error)
   7638 		goto fail_1;
   7639 
   7640 	/* For receive */
   7641 	error = 0;
   7642 	rx_done = 0;
   7643 	for (i = 0; i < sc->sc_nqueues; i++) {
   7644 #ifdef WM_EVENT_COUNTERS
   7645 		const char *xname;
   7646 #endif
   7647 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7648 		rxq->rxq_sc = sc;
   7649 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7650 
   7651 		error = wm_alloc_rx_descs(sc, rxq);
   7652 		if (error)
   7653 			break;
   7654 
   7655 		error = wm_alloc_rx_buffer(sc, rxq);
   7656 		if (error) {
   7657 			wm_free_rx_descs(sc, rxq);
   7658 			break;
   7659 		}
   7660 
   7661 #ifdef WM_EVENT_COUNTERS
   7662 		xname = device_xname(sc->sc_dev);
   7663 
   7664 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7665 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7666 
   7667 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7668 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7669 #endif /* WM_EVENT_COUNTERS */
   7670 
   7671 		rx_done++;
   7672 	}
   7673 	if (error)
   7674 		goto fail_2;
   7675 
   7676 	return 0;
   7677 
   7678  fail_2:
   7679 	for (i = 0; i < rx_done; i++) {
   7680 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7681 		wm_free_rx_buffer(sc, rxq);
   7682 		wm_free_rx_descs(sc, rxq);
   7683 		if (rxq->rxq_lock)
   7684 			mutex_obj_free(rxq->rxq_lock);
   7685 	}
   7686  fail_1:
   7687 	for (i = 0; i < tx_done; i++) {
   7688 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7689 		pcq_destroy(txq->txq_interq);
   7690 		wm_free_tx_buffer(sc, txq);
   7691 		wm_free_tx_descs(sc, txq);
   7692 		if (txq->txq_lock)
   7693 			mutex_obj_free(txq->txq_lock);
   7694 	}
   7695 
   7696 	kmem_free(sc->sc_queue,
   7697 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7698  fail_0:
   7699 	return error;
   7700 }
   7701 
   7702 /*
   7703  * wm_free_quques:
   7704  *	Free {tx,rx}descs and {tx,rx} buffers
   7705  */
   7706 static void
   7707 wm_free_txrx_queues(struct wm_softc *sc)
   7708 {
   7709 	int i;
   7710 
   7711 	for (i = 0; i < sc->sc_nqueues; i++) {
   7712 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7713 
   7714 #ifdef WM_EVENT_COUNTERS
   7715 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7716 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7717 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7718 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7719 #endif /* WM_EVENT_COUNTERS */
   7720 
   7721 		wm_free_rx_buffer(sc, rxq);
   7722 		wm_free_rx_descs(sc, rxq);
   7723 		if (rxq->rxq_lock)
   7724 			mutex_obj_free(rxq->rxq_lock);
   7725 	}
   7726 
   7727 	for (i = 0; i < sc->sc_nqueues; i++) {
   7728 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7729 		struct mbuf *m;
   7730 #ifdef WM_EVENT_COUNTERS
   7731 		int j;
   7732 
   7733 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7734 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7735 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7736 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7737 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7738 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7739 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7740 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7741 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7742 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7743 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7744 
   7745 		for (j = 0; j < WM_NTXSEGS; j++)
   7746 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7747 
   7748 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7749 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7750 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7751 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7752 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7753 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7754 #endif /* WM_EVENT_COUNTERS */
   7755 
   7756 		/* Drain txq_interq */
   7757 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7758 			m_freem(m);
   7759 		pcq_destroy(txq->txq_interq);
   7760 
   7761 		wm_free_tx_buffer(sc, txq);
   7762 		wm_free_tx_descs(sc, txq);
   7763 		if (txq->txq_lock)
   7764 			mutex_obj_free(txq->txq_lock);
   7765 	}
   7766 
   7767 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7768 }
   7769 
   7770 static void
   7771 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7772 {
   7773 
   7774 	KASSERT(mutex_owned(txq->txq_lock));
   7775 
   7776 	/* Initialize the transmit descriptor ring. */
   7777 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7778 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7779 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7780 	txq->txq_free = WM_NTXDESC(txq);
   7781 	txq->txq_next = 0;
   7782 }
   7783 
   7784 static void
   7785 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7786     struct wm_txqueue *txq)
   7787 {
   7788 
   7789 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7790 		device_xname(sc->sc_dev), __func__));
   7791 	KASSERT(mutex_owned(txq->txq_lock));
   7792 
   7793 	if (sc->sc_type < WM_T_82543) {
   7794 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7795 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7796 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7797 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7798 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7799 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7800 	} else {
   7801 		int qid = wmq->wmq_id;
   7802 
   7803 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7804 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7805 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7806 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7807 
   7808 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7809 			/*
   7810 			 * Don't write TDT before TCTL.EN is set.
   7811 			 * See the document.
   7812 			 */
   7813 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7814 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7815 			    | TXDCTL_WTHRESH(0));
   7816 		else {
   7817 			/* XXX should update with AIM? */
   7818 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7819 			if (sc->sc_type >= WM_T_82540) {
   7820 				/* Should be the same */
   7821 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7822 			}
   7823 
   7824 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7825 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7826 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7827 		}
   7828 	}
   7829 }
   7830 
   7831 static void
   7832 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7833 {
   7834 	int i;
   7835 
   7836 	KASSERT(mutex_owned(txq->txq_lock));
   7837 
   7838 	/* Initialize the transmit job descriptors. */
   7839 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7840 		txq->txq_soft[i].txs_mbuf = NULL;
   7841 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7842 	txq->txq_snext = 0;
   7843 	txq->txq_sdirty = 0;
   7844 }
   7845 
   7846 static void
   7847 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7848     struct wm_txqueue *txq)
   7849 {
   7850 
   7851 	KASSERT(mutex_owned(txq->txq_lock));
   7852 
   7853 	/*
   7854 	 * Set up some register offsets that are different between
   7855 	 * the i82542 and the i82543 and later chips.
   7856 	 */
   7857 	if (sc->sc_type < WM_T_82543)
   7858 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7859 	else
   7860 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7861 
   7862 	wm_init_tx_descs(sc, txq);
   7863 	wm_init_tx_regs(sc, wmq, txq);
   7864 	wm_init_tx_buffer(sc, txq);
   7865 
   7866 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
   7867 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
   7868 
   7869 	txq->txq_sending = false;
   7870 }
   7871 
   7872 static void
   7873 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7874     struct wm_rxqueue *rxq)
   7875 {
   7876 
   7877 	KASSERT(mutex_owned(rxq->rxq_lock));
   7878 
   7879 	/*
   7880 	 * Initialize the receive descriptor and receive job
   7881 	 * descriptor rings.
   7882 	 */
   7883 	if (sc->sc_type < WM_T_82543) {
   7884 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7885 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7886 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7887 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7888 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7889 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7890 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7891 
   7892 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7893 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7894 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7895 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7896 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7897 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7898 	} else {
   7899 		int qid = wmq->wmq_id;
   7900 
   7901 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7902 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7903 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7904 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7905 
   7906 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7907 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7908 				panic("%s: MCLBYTES %d unsupported for 82575 "
   7909 				    "or higher\n", __func__, MCLBYTES);
   7910 
   7911 			/*
   7912 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
   7913 			 * only.
   7914 			 */
   7915 			CSR_WRITE(sc, WMREG_SRRCTL(qid),
   7916 			    SRRCTL_DESCTYPE_ADV_ONEBUF
   7917 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7918 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7919 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7920 			    | RXDCTL_WTHRESH(1));
   7921 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7922 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7923 		} else {
   7924 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7925 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7926 			/* XXX should update with AIM? */
   7927 			CSR_WRITE(sc, WMREG_RDTR,
   7928 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7929 			/* MUST be same */
   7930 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7931 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7932 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7933 		}
   7934 	}
   7935 }
   7936 
   7937 static int
   7938 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7939 {
   7940 	struct wm_rxsoft *rxs;
   7941 	int error, i;
   7942 
   7943 	KASSERT(mutex_owned(rxq->rxq_lock));
   7944 
   7945 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7946 		rxs = &rxq->rxq_soft[i];
   7947 		if (rxs->rxs_mbuf == NULL) {
   7948 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7949 				log(LOG_ERR, "%s: unable to allocate or map "
   7950 				    "rx buffer %d, error = %d\n",
   7951 				    device_xname(sc->sc_dev), i, error);
   7952 				/*
   7953 				 * XXX Should attempt to run with fewer receive
   7954 				 * XXX buffers instead of just failing.
   7955 				 */
   7956 				wm_rxdrain(rxq);
   7957 				return ENOMEM;
   7958 			}
   7959 		} else {
   7960 			/*
   7961 			 * For 82575 and 82576, the RX descriptors must be
   7962 			 * initialized after the setting of RCTL.EN in
   7963 			 * wm_set_filter()
   7964 			 */
   7965 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7966 				wm_init_rxdesc(rxq, i);
   7967 		}
   7968 	}
   7969 	rxq->rxq_ptr = 0;
   7970 	rxq->rxq_discard = 0;
   7971 	WM_RXCHAIN_RESET(rxq);
   7972 
   7973 	return 0;
   7974 }
   7975 
   7976 static int
   7977 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7978     struct wm_rxqueue *rxq)
   7979 {
   7980 
   7981 	KASSERT(mutex_owned(rxq->rxq_lock));
   7982 
   7983 	/*
   7984 	 * Set up some register offsets that are different between
   7985 	 * the i82542 and the i82543 and later chips.
   7986 	 */
   7987 	if (sc->sc_type < WM_T_82543)
   7988 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7989 	else
   7990 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7991 
   7992 	wm_init_rx_regs(sc, wmq, rxq);
   7993 	return wm_init_rx_buffer(sc, rxq);
   7994 }
   7995 
   7996 /*
   7997  * wm_init_quques:
   7998  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7999  */
   8000 static int
   8001 wm_init_txrx_queues(struct wm_softc *sc)
   8002 {
   8003 	int i, error = 0;
   8004 
   8005 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8006 		device_xname(sc->sc_dev), __func__));
   8007 
   8008 	for (i = 0; i < sc->sc_nqueues; i++) {
   8009 		struct wm_queue *wmq = &sc->sc_queue[i];
   8010 		struct wm_txqueue *txq = &wmq->wmq_txq;
   8011 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8012 
   8013 		/*
   8014 		 * TODO
   8015 		 * Currently, use constant variable instead of AIM.
   8016 		 * Furthermore, the interrupt interval of multiqueue which use
   8017 		 * polling mode is less than default value.
   8018 		 * More tuning and AIM are required.
   8019 		 */
   8020 		if (wm_is_using_multiqueue(sc))
   8021 			wmq->wmq_itr = 50;
   8022 		else
   8023 			wmq->wmq_itr = sc->sc_itr_init;
   8024 		wmq->wmq_set_itr = true;
   8025 
   8026 		mutex_enter(txq->txq_lock);
   8027 		wm_init_tx_queue(sc, wmq, txq);
   8028 		mutex_exit(txq->txq_lock);
   8029 
   8030 		mutex_enter(rxq->rxq_lock);
   8031 		error = wm_init_rx_queue(sc, wmq, rxq);
   8032 		mutex_exit(rxq->rxq_lock);
   8033 		if (error)
   8034 			break;
   8035 	}
   8036 
   8037 	return error;
   8038 }
   8039 
   8040 /*
   8041  * wm_tx_offload:
   8042  *
   8043  *	Set up TCP/IP checksumming parameters for the
   8044  *	specified packet.
   8045  */
   8046 static void
   8047 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8048     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   8049 {
   8050 	struct mbuf *m0 = txs->txs_mbuf;
   8051 	struct livengood_tcpip_ctxdesc *t;
   8052 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   8053 	uint32_t ipcse;
   8054 	struct ether_header *eh;
   8055 	int offset, iphl;
   8056 	uint8_t fields;
   8057 
   8058 	/*
   8059 	 * XXX It would be nice if the mbuf pkthdr had offset
   8060 	 * fields for the protocol headers.
   8061 	 */
   8062 
   8063 	eh = mtod(m0, struct ether_header *);
   8064 	switch (htons(eh->ether_type)) {
   8065 	case ETHERTYPE_IP:
   8066 	case ETHERTYPE_IPV6:
   8067 		offset = ETHER_HDR_LEN;
   8068 		break;
   8069 
   8070 	case ETHERTYPE_VLAN:
   8071 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8072 		break;
   8073 
   8074 	default:
   8075 		/* Don't support this protocol or encapsulation. */
   8076 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8077 		txq->txq_last_hw_ipcs = 0;
   8078 		txq->txq_last_hw_tucs = 0;
   8079 		*fieldsp = 0;
   8080 		*cmdp = 0;
   8081 		return;
   8082 	}
   8083 
   8084 	if ((m0->m_pkthdr.csum_flags &
   8085 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8086 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8087 	} else
   8088 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8089 
   8090 	ipcse = offset + iphl - 1;
   8091 
   8092 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   8093 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   8094 	seg = 0;
   8095 	fields = 0;
   8096 
   8097 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8098 		int hlen = offset + iphl;
   8099 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8100 
   8101 		if (__predict_false(m0->m_len <
   8102 				    (hlen + sizeof(struct tcphdr)))) {
   8103 			/*
   8104 			 * TCP/IP headers are not in the first mbuf; we need
   8105 			 * to do this the slow and painful way. Let's just
   8106 			 * hope this doesn't happen very often.
   8107 			 */
   8108 			struct tcphdr th;
   8109 
   8110 			WM_Q_EVCNT_INCR(txq, tsopain);
   8111 
   8112 			m_copydata(m0, hlen, sizeof(th), &th);
   8113 			if (v4) {
   8114 				struct ip ip;
   8115 
   8116 				m_copydata(m0, offset, sizeof(ip), &ip);
   8117 				ip.ip_len = 0;
   8118 				m_copyback(m0,
   8119 				    offset + offsetof(struct ip, ip_len),
   8120 				    sizeof(ip.ip_len), &ip.ip_len);
   8121 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8122 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8123 			} else {
   8124 				struct ip6_hdr ip6;
   8125 
   8126 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8127 				ip6.ip6_plen = 0;
   8128 				m_copyback(m0,
   8129 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8130 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8131 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8132 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8133 			}
   8134 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8135 			    sizeof(th.th_sum), &th.th_sum);
   8136 
   8137 			hlen += th.th_off << 2;
   8138 		} else {
   8139 			/*
   8140 			 * TCP/IP headers are in the first mbuf; we can do
   8141 			 * this the easy way.
   8142 			 */
   8143 			struct tcphdr *th;
   8144 
   8145 			if (v4) {
   8146 				struct ip *ip =
   8147 				    (void *)(mtod(m0, char *) + offset);
   8148 				th = (void *)(mtod(m0, char *) + hlen);
   8149 
   8150 				ip->ip_len = 0;
   8151 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8152 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8153 			} else {
   8154 				struct ip6_hdr *ip6 =
   8155 				    (void *)(mtod(m0, char *) + offset);
   8156 				th = (void *)(mtod(m0, char *) + hlen);
   8157 
   8158 				ip6->ip6_plen = 0;
   8159 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8160 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8161 			}
   8162 			hlen += th->th_off << 2;
   8163 		}
   8164 
   8165 		if (v4) {
   8166 			WM_Q_EVCNT_INCR(txq, tso);
   8167 			cmdlen |= WTX_TCPIP_CMD_IP;
   8168 		} else {
   8169 			WM_Q_EVCNT_INCR(txq, tso6);
   8170 			ipcse = 0;
   8171 		}
   8172 		cmd |= WTX_TCPIP_CMD_TSE;
   8173 		cmdlen |= WTX_TCPIP_CMD_TSE |
   8174 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   8175 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   8176 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   8177 	}
   8178 
   8179 	/*
   8180 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   8181 	 * offload feature, if we load the context descriptor, we
   8182 	 * MUST provide valid values for IPCSS and TUCSS fields.
   8183 	 */
   8184 
   8185 	ipcs = WTX_TCPIP_IPCSS(offset) |
   8186 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   8187 	    WTX_TCPIP_IPCSE(ipcse);
   8188 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   8189 		WM_Q_EVCNT_INCR(txq, ipsum);
   8190 		fields |= WTX_IXSM;
   8191 	}
   8192 
   8193 	offset += iphl;
   8194 
   8195 	if (m0->m_pkthdr.csum_flags &
   8196 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   8197 		WM_Q_EVCNT_INCR(txq, tusum);
   8198 		fields |= WTX_TXSM;
   8199 		tucs = WTX_TCPIP_TUCSS(offset) |
   8200 		    WTX_TCPIP_TUCSO(offset +
   8201 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   8202 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8203 	} else if ((m0->m_pkthdr.csum_flags &
   8204 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   8205 		WM_Q_EVCNT_INCR(txq, tusum6);
   8206 		fields |= WTX_TXSM;
   8207 		tucs = WTX_TCPIP_TUCSS(offset) |
   8208 		    WTX_TCPIP_TUCSO(offset +
   8209 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   8210 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8211 	} else {
   8212 		/* Just initialize it to a valid TCP context. */
   8213 		tucs = WTX_TCPIP_TUCSS(offset) |
   8214 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   8215 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8216 	}
   8217 
   8218 	*cmdp = cmd;
   8219 	*fieldsp = fields;
   8220 
   8221 	/*
   8222 	 * We don't have to write context descriptor for every packet
   8223 	 * except for 82574. For 82574, we must write context descriptor
   8224 	 * for every packet when we use two descriptor queues.
   8225 	 *
   8226 	 * The 82574L can only remember the *last* context used
   8227 	 * regardless of queue that it was use for.  We cannot reuse
   8228 	 * contexts on this hardware platform and must generate a new
   8229 	 * context every time.  82574L hardware spec, section 7.2.6,
   8230 	 * second note.
   8231 	 */
   8232 	if (sc->sc_nqueues < 2) {
   8233 		/*
   8234 		 * Setting up new checksum offload context for every
   8235 		 * frames takes a lot of processing time for hardware.
   8236 		 * This also reduces performance a lot for small sized
   8237 		 * frames so avoid it if driver can use previously
   8238 		 * configured checksum offload context.
   8239 		 * For TSO, in theory we can use the same TSO context only if
   8240 		 * frame is the same type(IP/TCP) and the same MSS. However
   8241 		 * checking whether a frame has the same IP/TCP structure is a
   8242 		 * hard thing so just ignore that and always restablish a
   8243 		 * new TSO context.
   8244 		 */
   8245 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   8246 		    == 0) {
   8247 			if (txq->txq_last_hw_cmd == cmd &&
   8248 			    txq->txq_last_hw_fields == fields &&
   8249 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   8250 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   8251 				WM_Q_EVCNT_INCR(txq, skipcontext);
   8252 				return;
   8253 			}
   8254 		}
   8255 
   8256 		txq->txq_last_hw_cmd = cmd;
   8257 		txq->txq_last_hw_fields = fields;
   8258 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   8259 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   8260 	}
   8261 
   8262 	/* Fill in the context descriptor. */
   8263 	t = (struct livengood_tcpip_ctxdesc *)
   8264 	    &txq->txq_descs[txq->txq_next];
   8265 	t->tcpip_ipcs = htole32(ipcs);
   8266 	t->tcpip_tucs = htole32(tucs);
   8267 	t->tcpip_cmdlen = htole32(cmdlen);
   8268 	t->tcpip_seg = htole32(seg);
   8269 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8270 
   8271 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8272 	txs->txs_ndesc++;
   8273 }
   8274 
   8275 static inline int
   8276 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   8277 {
   8278 	struct wm_softc *sc = ifp->if_softc;
   8279 	u_int cpuid = cpu_index(curcpu());
   8280 
   8281 	/*
   8282 	 * Currently, simple distribute strategy.
   8283 	 * TODO:
   8284 	 * distribute by flowid(RSS has value).
   8285 	 */
   8286 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   8287 }
   8288 
   8289 static inline bool
   8290 wm_linkdown_discard(struct wm_txqueue *txq)
   8291 {
   8292 
   8293 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   8294 		return true;
   8295 
   8296 	return false;
   8297 }
   8298 
   8299 /*
   8300  * wm_start:		[ifnet interface function]
   8301  *
   8302  *	Start packet transmission on the interface.
   8303  */
   8304 static void
   8305 wm_start(struct ifnet *ifp)
   8306 {
   8307 	struct wm_softc *sc = ifp->if_softc;
   8308 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8309 
   8310 #ifdef WM_MPSAFE
   8311 	KASSERT(if_is_mpsafe(ifp));
   8312 #endif
   8313 	/*
   8314 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8315 	 */
   8316 
   8317 	mutex_enter(txq->txq_lock);
   8318 	if (!txq->txq_stopping)
   8319 		wm_start_locked(ifp);
   8320 	mutex_exit(txq->txq_lock);
   8321 }
   8322 
   8323 static void
   8324 wm_start_locked(struct ifnet *ifp)
   8325 {
   8326 	struct wm_softc *sc = ifp->if_softc;
   8327 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8328 
   8329 	wm_send_common_locked(ifp, txq, false);
   8330 }
   8331 
   8332 static int
   8333 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   8334 {
   8335 	int qid;
   8336 	struct wm_softc *sc = ifp->if_softc;
   8337 	struct wm_txqueue *txq;
   8338 
   8339 	qid = wm_select_txqueue(ifp, m);
   8340 	txq = &sc->sc_queue[qid].wmq_txq;
   8341 
   8342 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8343 		m_freem(m);
   8344 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8345 		return ENOBUFS;
   8346 	}
   8347 
   8348 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8349 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8350 	if (m->m_flags & M_MCAST)
   8351 		if_statinc_ref(nsr, if_omcasts);
   8352 	IF_STAT_PUTREF(ifp);
   8353 
   8354 	if (mutex_tryenter(txq->txq_lock)) {
   8355 		if (!txq->txq_stopping)
   8356 			wm_transmit_locked(ifp, txq);
   8357 		mutex_exit(txq->txq_lock);
   8358 	}
   8359 
   8360 	return 0;
   8361 }
   8362 
   8363 static void
   8364 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8365 {
   8366 
   8367 	wm_send_common_locked(ifp, txq, true);
   8368 }
   8369 
   8370 static void
   8371 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8372     bool is_transmit)
   8373 {
   8374 	struct wm_softc *sc = ifp->if_softc;
   8375 	struct mbuf *m0;
   8376 	struct wm_txsoft *txs;
   8377 	bus_dmamap_t dmamap;
   8378 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   8379 	bus_addr_t curaddr;
   8380 	bus_size_t seglen, curlen;
   8381 	uint32_t cksumcmd;
   8382 	uint8_t cksumfields;
   8383 	bool remap = true;
   8384 
   8385 	KASSERT(mutex_owned(txq->txq_lock));
   8386 
   8387 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8388 		return;
   8389 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8390 		return;
   8391 
   8392 	if (__predict_false(wm_linkdown_discard(txq))) {
   8393 		do {
   8394 			if (is_transmit)
   8395 				m0 = pcq_get(txq->txq_interq);
   8396 			else
   8397 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8398 			/*
   8399 			 * increment successed packet counter as in the case
   8400 			 * which the packet is discarded by link down PHY.
   8401 			 */
   8402 			if (m0 != NULL) {
   8403 				if_statinc(ifp, if_opackets);
   8404 				m_freem(m0);
   8405 			}
   8406 		} while (m0 != NULL);
   8407 		return;
   8408 	}
   8409 
   8410 	/* Remember the previous number of free descriptors. */
   8411 	ofree = txq->txq_free;
   8412 
   8413 	/*
   8414 	 * Loop through the send queue, setting up transmit descriptors
   8415 	 * until we drain the queue, or use up all available transmit
   8416 	 * descriptors.
   8417 	 */
   8418 	for (;;) {
   8419 		m0 = NULL;
   8420 
   8421 		/* Get a work queue entry. */
   8422 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8423 			wm_txeof(txq, UINT_MAX);
   8424 			if (txq->txq_sfree == 0) {
   8425 				DPRINTF(sc, WM_DEBUG_TX,
   8426 				    ("%s: TX: no free job descriptors\n",
   8427 					device_xname(sc->sc_dev)));
   8428 				WM_Q_EVCNT_INCR(txq, txsstall);
   8429 				break;
   8430 			}
   8431 		}
   8432 
   8433 		/* Grab a packet off the queue. */
   8434 		if (is_transmit)
   8435 			m0 = pcq_get(txq->txq_interq);
   8436 		else
   8437 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8438 		if (m0 == NULL)
   8439 			break;
   8440 
   8441 		DPRINTF(sc, WM_DEBUG_TX,
   8442 		    ("%s: TX: have packet to transmit: %p\n",
   8443 			device_xname(sc->sc_dev), m0));
   8444 
   8445 		txs = &txq->txq_soft[txq->txq_snext];
   8446 		dmamap = txs->txs_dmamap;
   8447 
   8448 		use_tso = (m0->m_pkthdr.csum_flags &
   8449 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8450 
   8451 		/*
   8452 		 * So says the Linux driver:
   8453 		 * The controller does a simple calculation to make sure
   8454 		 * there is enough room in the FIFO before initiating the
   8455 		 * DMA for each buffer. The calc is:
   8456 		 *	4 = ceil(buffer len / MSS)
   8457 		 * To make sure we don't overrun the FIFO, adjust the max
   8458 		 * buffer len if the MSS drops.
   8459 		 */
   8460 		dmamap->dm_maxsegsz =
   8461 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8462 		    ? m0->m_pkthdr.segsz << 2
   8463 		    : WTX_MAX_LEN;
   8464 
   8465 		/*
   8466 		 * Load the DMA map.  If this fails, the packet either
   8467 		 * didn't fit in the allotted number of segments, or we
   8468 		 * were short on resources.  For the too-many-segments
   8469 		 * case, we simply report an error and drop the packet,
   8470 		 * since we can't sanely copy a jumbo packet to a single
   8471 		 * buffer.
   8472 		 */
   8473 retry:
   8474 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8475 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8476 		if (__predict_false(error)) {
   8477 			if (error == EFBIG) {
   8478 				if (remap == true) {
   8479 					struct mbuf *m;
   8480 
   8481 					remap = false;
   8482 					m = m_defrag(m0, M_NOWAIT);
   8483 					if (m != NULL) {
   8484 						WM_Q_EVCNT_INCR(txq, defrag);
   8485 						m0 = m;
   8486 						goto retry;
   8487 					}
   8488 				}
   8489 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8490 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8491 				    "DMA segments, dropping...\n",
   8492 				    device_xname(sc->sc_dev));
   8493 				wm_dump_mbuf_chain(sc, m0);
   8494 				m_freem(m0);
   8495 				continue;
   8496 			}
   8497 			/* Short on resources, just stop for now. */
   8498 			DPRINTF(sc, WM_DEBUG_TX,
   8499 			    ("%s: TX: dmamap load failed: %d\n",
   8500 				device_xname(sc->sc_dev), error));
   8501 			break;
   8502 		}
   8503 
   8504 		segs_needed = dmamap->dm_nsegs;
   8505 		if (use_tso) {
   8506 			/* For sentinel descriptor; see below. */
   8507 			segs_needed++;
   8508 		}
   8509 
   8510 		/*
   8511 		 * Ensure we have enough descriptors free to describe
   8512 		 * the packet. Note, we always reserve one descriptor
   8513 		 * at the end of the ring due to the semantics of the
   8514 		 * TDT register, plus one more in the event we need
   8515 		 * to load offload context.
   8516 		 */
   8517 		if (segs_needed > txq->txq_free - 2) {
   8518 			/*
   8519 			 * Not enough free descriptors to transmit this
   8520 			 * packet.  We haven't committed anything yet,
   8521 			 * so just unload the DMA map, put the packet
   8522 			 * pack on the queue, and punt. Notify the upper
   8523 			 * layer that there are no more slots left.
   8524 			 */
   8525 			DPRINTF(sc, WM_DEBUG_TX,
   8526 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8527 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8528 				segs_needed, txq->txq_free - 1));
   8529 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8530 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8531 			WM_Q_EVCNT_INCR(txq, txdstall);
   8532 			break;
   8533 		}
   8534 
   8535 		/*
   8536 		 * Check for 82547 Tx FIFO bug. We need to do this
   8537 		 * once we know we can transmit the packet, since we
   8538 		 * do some internal FIFO space accounting here.
   8539 		 */
   8540 		if (sc->sc_type == WM_T_82547 &&
   8541 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8542 			DPRINTF(sc, WM_DEBUG_TX,
   8543 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8544 				device_xname(sc->sc_dev)));
   8545 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8546 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8547 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8548 			break;
   8549 		}
   8550 
   8551 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8552 
   8553 		DPRINTF(sc, WM_DEBUG_TX,
   8554 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8555 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8556 
   8557 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8558 
   8559 		/*
   8560 		 * Store a pointer to the packet so that we can free it
   8561 		 * later.
   8562 		 *
   8563 		 * Initially, we consider the number of descriptors the
   8564 		 * packet uses the number of DMA segments.  This may be
   8565 		 * incremented by 1 if we do checksum offload (a descriptor
   8566 		 * is used to set the checksum context).
   8567 		 */
   8568 		txs->txs_mbuf = m0;
   8569 		txs->txs_firstdesc = txq->txq_next;
   8570 		txs->txs_ndesc = segs_needed;
   8571 
   8572 		/* Set up offload parameters for this packet. */
   8573 		if (m0->m_pkthdr.csum_flags &
   8574 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8575 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8576 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8577 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8578 		} else {
   8579 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8580 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8581 			cksumcmd = 0;
   8582 			cksumfields = 0;
   8583 		}
   8584 
   8585 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8586 
   8587 		/* Sync the DMA map. */
   8588 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8589 		    BUS_DMASYNC_PREWRITE);
   8590 
   8591 		/* Initialize the transmit descriptor. */
   8592 		for (nexttx = txq->txq_next, seg = 0;
   8593 		     seg < dmamap->dm_nsegs; seg++) {
   8594 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8595 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8596 			     seglen != 0;
   8597 			     curaddr += curlen, seglen -= curlen,
   8598 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8599 				curlen = seglen;
   8600 
   8601 				/*
   8602 				 * So says the Linux driver:
   8603 				 * Work around for premature descriptor
   8604 				 * write-backs in TSO mode.  Append a
   8605 				 * 4-byte sentinel descriptor.
   8606 				 */
   8607 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8608 				    curlen > 8)
   8609 					curlen -= 4;
   8610 
   8611 				wm_set_dma_addr(
   8612 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8613 				txq->txq_descs[nexttx].wtx_cmdlen
   8614 				    = htole32(cksumcmd | curlen);
   8615 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8616 				    = 0;
   8617 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8618 				    = cksumfields;
   8619 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8620 				lasttx = nexttx;
   8621 
   8622 				DPRINTF(sc, WM_DEBUG_TX,
   8623 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8624 					"len %#04zx\n",
   8625 					device_xname(sc->sc_dev), nexttx,
   8626 					(uint64_t)curaddr, curlen));
   8627 			}
   8628 		}
   8629 
   8630 		KASSERT(lasttx != -1);
   8631 
   8632 		/*
   8633 		 * Set up the command byte on the last descriptor of
   8634 		 * the packet. If we're in the interrupt delay window,
   8635 		 * delay the interrupt.
   8636 		 */
   8637 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8638 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8639 
   8640 		/*
   8641 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8642 		 * up the descriptor to encapsulate the packet for us.
   8643 		 *
   8644 		 * This is only valid on the last descriptor of the packet.
   8645 		 */
   8646 		if (vlan_has_tag(m0)) {
   8647 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8648 			    htole32(WTX_CMD_VLE);
   8649 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8650 			    = htole16(vlan_get_tag(m0));
   8651 		}
   8652 
   8653 		txs->txs_lastdesc = lasttx;
   8654 
   8655 		DPRINTF(sc, WM_DEBUG_TX,
   8656 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8657 			device_xname(sc->sc_dev),
   8658 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8659 
   8660 		/* Sync the descriptors we're using. */
   8661 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8662 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8663 
   8664 		/* Give the packet to the chip. */
   8665 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8666 
   8667 		DPRINTF(sc, WM_DEBUG_TX,
   8668 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8669 
   8670 		DPRINTF(sc, WM_DEBUG_TX,
   8671 		    ("%s: TX: finished transmitting packet, job %d\n",
   8672 			device_xname(sc->sc_dev), txq->txq_snext));
   8673 
   8674 		/* Advance the tx pointer. */
   8675 		txq->txq_free -= txs->txs_ndesc;
   8676 		txq->txq_next = nexttx;
   8677 
   8678 		txq->txq_sfree--;
   8679 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8680 
   8681 		/* Pass the packet to any BPF listeners. */
   8682 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8683 	}
   8684 
   8685 	if (m0 != NULL) {
   8686 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8687 		WM_Q_EVCNT_INCR(txq, descdrop);
   8688 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8689 			__func__));
   8690 		m_freem(m0);
   8691 	}
   8692 
   8693 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8694 		/* No more slots; notify upper layer. */
   8695 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8696 	}
   8697 
   8698 	if (txq->txq_free != ofree) {
   8699 		/* Set a watchdog timer in case the chip flakes out. */
   8700 		txq->txq_lastsent = time_uptime;
   8701 		txq->txq_sending = true;
   8702 	}
   8703 }
   8704 
   8705 /*
   8706  * wm_nq_tx_offload:
   8707  *
   8708  *	Set up TCP/IP checksumming parameters for the
   8709  *	specified packet, for NEWQUEUE devices
   8710  */
   8711 static void
   8712 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8713     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8714 {
   8715 	struct mbuf *m0 = txs->txs_mbuf;
   8716 	uint32_t vl_len, mssidx, cmdc;
   8717 	struct ether_header *eh;
   8718 	int offset, iphl;
   8719 
   8720 	/*
   8721 	 * XXX It would be nice if the mbuf pkthdr had offset
   8722 	 * fields for the protocol headers.
   8723 	 */
   8724 	*cmdlenp = 0;
   8725 	*fieldsp = 0;
   8726 
   8727 	eh = mtod(m0, struct ether_header *);
   8728 	switch (htons(eh->ether_type)) {
   8729 	case ETHERTYPE_IP:
   8730 	case ETHERTYPE_IPV6:
   8731 		offset = ETHER_HDR_LEN;
   8732 		break;
   8733 
   8734 	case ETHERTYPE_VLAN:
   8735 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8736 		break;
   8737 
   8738 	default:
   8739 		/* Don't support this protocol or encapsulation. */
   8740 		*do_csum = false;
   8741 		return;
   8742 	}
   8743 	*do_csum = true;
   8744 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8745 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8746 
   8747 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8748 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8749 
   8750 	if ((m0->m_pkthdr.csum_flags &
   8751 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8752 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8753 	} else {
   8754 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8755 	}
   8756 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8757 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8758 
   8759 	if (vlan_has_tag(m0)) {
   8760 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8761 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8762 		*cmdlenp |= NQTX_CMD_VLE;
   8763 	}
   8764 
   8765 	mssidx = 0;
   8766 
   8767 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8768 		int hlen = offset + iphl;
   8769 		int tcp_hlen;
   8770 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8771 
   8772 		if (__predict_false(m0->m_len <
   8773 				    (hlen + sizeof(struct tcphdr)))) {
   8774 			/*
   8775 			 * TCP/IP headers are not in the first mbuf; we need
   8776 			 * to do this the slow and painful way. Let's just
   8777 			 * hope this doesn't happen very often.
   8778 			 */
   8779 			struct tcphdr th;
   8780 
   8781 			WM_Q_EVCNT_INCR(txq, tsopain);
   8782 
   8783 			m_copydata(m0, hlen, sizeof(th), &th);
   8784 			if (v4) {
   8785 				struct ip ip;
   8786 
   8787 				m_copydata(m0, offset, sizeof(ip), &ip);
   8788 				ip.ip_len = 0;
   8789 				m_copyback(m0,
   8790 				    offset + offsetof(struct ip, ip_len),
   8791 				    sizeof(ip.ip_len), &ip.ip_len);
   8792 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8793 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8794 			} else {
   8795 				struct ip6_hdr ip6;
   8796 
   8797 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8798 				ip6.ip6_plen = 0;
   8799 				m_copyback(m0,
   8800 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8801 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8802 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8803 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8804 			}
   8805 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8806 			    sizeof(th.th_sum), &th.th_sum);
   8807 
   8808 			tcp_hlen = th.th_off << 2;
   8809 		} else {
   8810 			/*
   8811 			 * TCP/IP headers are in the first mbuf; we can do
   8812 			 * this the easy way.
   8813 			 */
   8814 			struct tcphdr *th;
   8815 
   8816 			if (v4) {
   8817 				struct ip *ip =
   8818 				    (void *)(mtod(m0, char *) + offset);
   8819 				th = (void *)(mtod(m0, char *) + hlen);
   8820 
   8821 				ip->ip_len = 0;
   8822 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8823 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8824 			} else {
   8825 				struct ip6_hdr *ip6 =
   8826 				    (void *)(mtod(m0, char *) + offset);
   8827 				th = (void *)(mtod(m0, char *) + hlen);
   8828 
   8829 				ip6->ip6_plen = 0;
   8830 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8831 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8832 			}
   8833 			tcp_hlen = th->th_off << 2;
   8834 		}
   8835 		hlen += tcp_hlen;
   8836 		*cmdlenp |= NQTX_CMD_TSE;
   8837 
   8838 		if (v4) {
   8839 			WM_Q_EVCNT_INCR(txq, tso);
   8840 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8841 		} else {
   8842 			WM_Q_EVCNT_INCR(txq, tso6);
   8843 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8844 		}
   8845 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8846 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8847 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8848 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8849 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8850 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8851 	} else {
   8852 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8853 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8854 	}
   8855 
   8856 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8857 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8858 		cmdc |= NQTXC_CMD_IP4;
   8859 	}
   8860 
   8861 	if (m0->m_pkthdr.csum_flags &
   8862 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8863 		WM_Q_EVCNT_INCR(txq, tusum);
   8864 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8865 			cmdc |= NQTXC_CMD_TCP;
   8866 		else
   8867 			cmdc |= NQTXC_CMD_UDP;
   8868 
   8869 		cmdc |= NQTXC_CMD_IP4;
   8870 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8871 	}
   8872 	if (m0->m_pkthdr.csum_flags &
   8873 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8874 		WM_Q_EVCNT_INCR(txq, tusum6);
   8875 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8876 			cmdc |= NQTXC_CMD_TCP;
   8877 		else
   8878 			cmdc |= NQTXC_CMD_UDP;
   8879 
   8880 		cmdc |= NQTXC_CMD_IP6;
   8881 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8882 	}
   8883 
   8884 	/*
   8885 	 * We don't have to write context descriptor for every packet to
   8886 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8887 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8888 	 * controllers.
   8889 	 * It would be overhead to write context descriptor for every packet,
   8890 	 * however it does not cause problems.
   8891 	 */
   8892 	/* Fill in the context descriptor. */
   8893 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8894 	    htole32(vl_len);
   8895 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8896 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8897 	    htole32(cmdc);
   8898 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8899 	    htole32(mssidx);
   8900 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8901 	DPRINTF(sc, WM_DEBUG_TX,
   8902 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8903 		txq->txq_next, 0, vl_len));
   8904 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8905 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8906 	txs->txs_ndesc++;
   8907 }
   8908 
   8909 /*
   8910  * wm_nq_start:		[ifnet interface function]
   8911  *
   8912  *	Start packet transmission on the interface for NEWQUEUE devices
   8913  */
   8914 static void
   8915 wm_nq_start(struct ifnet *ifp)
   8916 {
   8917 	struct wm_softc *sc = ifp->if_softc;
   8918 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8919 
   8920 #ifdef WM_MPSAFE
   8921 	KASSERT(if_is_mpsafe(ifp));
   8922 #endif
   8923 	/*
   8924 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8925 	 */
   8926 
   8927 	mutex_enter(txq->txq_lock);
   8928 	if (!txq->txq_stopping)
   8929 		wm_nq_start_locked(ifp);
   8930 	mutex_exit(txq->txq_lock);
   8931 }
   8932 
   8933 static void
   8934 wm_nq_start_locked(struct ifnet *ifp)
   8935 {
   8936 	struct wm_softc *sc = ifp->if_softc;
   8937 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8938 
   8939 	wm_nq_send_common_locked(ifp, txq, false);
   8940 }
   8941 
   8942 static int
   8943 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8944 {
   8945 	int qid;
   8946 	struct wm_softc *sc = ifp->if_softc;
   8947 	struct wm_txqueue *txq;
   8948 
   8949 	qid = wm_select_txqueue(ifp, m);
   8950 	txq = &sc->sc_queue[qid].wmq_txq;
   8951 
   8952 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8953 		m_freem(m);
   8954 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8955 		return ENOBUFS;
   8956 	}
   8957 
   8958 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8959 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8960 	if (m->m_flags & M_MCAST)
   8961 		if_statinc_ref(nsr, if_omcasts);
   8962 	IF_STAT_PUTREF(ifp);
   8963 
   8964 	/*
   8965 	 * The situations which this mutex_tryenter() fails at running time
   8966 	 * are below two patterns.
   8967 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8968 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8969 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8970 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8971 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8972 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8973 	 * stuck, either.
   8974 	 */
   8975 	if (mutex_tryenter(txq->txq_lock)) {
   8976 		if (!txq->txq_stopping)
   8977 			wm_nq_transmit_locked(ifp, txq);
   8978 		mutex_exit(txq->txq_lock);
   8979 	}
   8980 
   8981 	return 0;
   8982 }
   8983 
   8984 static void
   8985 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8986 {
   8987 
   8988 	wm_nq_send_common_locked(ifp, txq, true);
   8989 }
   8990 
   8991 static void
   8992 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8993     bool is_transmit)
   8994 {
   8995 	struct wm_softc *sc = ifp->if_softc;
   8996 	struct mbuf *m0;
   8997 	struct wm_txsoft *txs;
   8998 	bus_dmamap_t dmamap;
   8999 	int error, nexttx, lasttx = -1, seg, segs_needed;
   9000 	bool do_csum, sent;
   9001 	bool remap = true;
   9002 
   9003 	KASSERT(mutex_owned(txq->txq_lock));
   9004 
   9005 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   9006 		return;
   9007 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   9008 		return;
   9009 
   9010 	if (__predict_false(wm_linkdown_discard(txq))) {
   9011 		do {
   9012 			if (is_transmit)
   9013 				m0 = pcq_get(txq->txq_interq);
   9014 			else
   9015 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   9016 			/*
   9017 			 * increment successed packet counter as in the case
   9018 			 * which the packet is discarded by link down PHY.
   9019 			 */
   9020 			if (m0 != NULL) {
   9021 				if_statinc(ifp, if_opackets);
   9022 				m_freem(m0);
   9023 			}
   9024 		} while (m0 != NULL);
   9025 		return;
   9026 	}
   9027 
   9028 	sent = false;
   9029 
   9030 	/*
   9031 	 * Loop through the send queue, setting up transmit descriptors
   9032 	 * until we drain the queue, or use up all available transmit
   9033 	 * descriptors.
   9034 	 */
   9035 	for (;;) {
   9036 		m0 = NULL;
   9037 
   9038 		/* Get a work queue entry. */
   9039 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   9040 			wm_txeof(txq, UINT_MAX);
   9041 			if (txq->txq_sfree == 0) {
   9042 				DPRINTF(sc, WM_DEBUG_TX,
   9043 				    ("%s: TX: no free job descriptors\n",
   9044 					device_xname(sc->sc_dev)));
   9045 				WM_Q_EVCNT_INCR(txq, txsstall);
   9046 				break;
   9047 			}
   9048 		}
   9049 
   9050 		/* Grab a packet off the queue. */
   9051 		if (is_transmit)
   9052 			m0 = pcq_get(txq->txq_interq);
   9053 		else
   9054 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   9055 		if (m0 == NULL)
   9056 			break;
   9057 
   9058 		DPRINTF(sc, WM_DEBUG_TX,
   9059 		    ("%s: TX: have packet to transmit: %p\n",
   9060 		    device_xname(sc->sc_dev), m0));
   9061 
   9062 		txs = &txq->txq_soft[txq->txq_snext];
   9063 		dmamap = txs->txs_dmamap;
   9064 
   9065 		/*
   9066 		 * Load the DMA map.  If this fails, the packet either
   9067 		 * didn't fit in the allotted number of segments, or we
   9068 		 * were short on resources.  For the too-many-segments
   9069 		 * case, we simply report an error and drop the packet,
   9070 		 * since we can't sanely copy a jumbo packet to a single
   9071 		 * buffer.
   9072 		 */
   9073 retry:
   9074 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   9075 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   9076 		if (__predict_false(error)) {
   9077 			if (error == EFBIG) {
   9078 				if (remap == true) {
   9079 					struct mbuf *m;
   9080 
   9081 					remap = false;
   9082 					m = m_defrag(m0, M_NOWAIT);
   9083 					if (m != NULL) {
   9084 						WM_Q_EVCNT_INCR(txq, defrag);
   9085 						m0 = m;
   9086 						goto retry;
   9087 					}
   9088 				}
   9089 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   9090 				log(LOG_ERR, "%s: Tx packet consumes too many "
   9091 				    "DMA segments, dropping...\n",
   9092 				    device_xname(sc->sc_dev));
   9093 				wm_dump_mbuf_chain(sc, m0);
   9094 				m_freem(m0);
   9095 				continue;
   9096 			}
   9097 			/* Short on resources, just stop for now. */
   9098 			DPRINTF(sc, WM_DEBUG_TX,
   9099 			    ("%s: TX: dmamap load failed: %d\n",
   9100 				device_xname(sc->sc_dev), error));
   9101 			break;
   9102 		}
   9103 
   9104 		segs_needed = dmamap->dm_nsegs;
   9105 
   9106 		/*
   9107 		 * Ensure we have enough descriptors free to describe
   9108 		 * the packet. Note, we always reserve one descriptor
   9109 		 * at the end of the ring due to the semantics of the
   9110 		 * TDT register, plus one more in the event we need
   9111 		 * to load offload context.
   9112 		 */
   9113 		if (segs_needed > txq->txq_free - 2) {
   9114 			/*
   9115 			 * Not enough free descriptors to transmit this
   9116 			 * packet.  We haven't committed anything yet,
   9117 			 * so just unload the DMA map, put the packet
   9118 			 * pack on the queue, and punt. Notify the upper
   9119 			 * layer that there are no more slots left.
   9120 			 */
   9121 			DPRINTF(sc, WM_DEBUG_TX,
   9122 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9123 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9124 				segs_needed, txq->txq_free - 1));
   9125 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9126 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9127 			WM_Q_EVCNT_INCR(txq, txdstall);
   9128 			break;
   9129 		}
   9130 
   9131 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9132 
   9133 		DPRINTF(sc, WM_DEBUG_TX,
   9134 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9135 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9136 
   9137 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9138 
   9139 		/*
   9140 		 * Store a pointer to the packet so that we can free it
   9141 		 * later.
   9142 		 *
   9143 		 * Initially, we consider the number of descriptors the
   9144 		 * packet uses the number of DMA segments.  This may be
   9145 		 * incremented by 1 if we do checksum offload (a descriptor
   9146 		 * is used to set the checksum context).
   9147 		 */
   9148 		txs->txs_mbuf = m0;
   9149 		txs->txs_firstdesc = txq->txq_next;
   9150 		txs->txs_ndesc = segs_needed;
   9151 
   9152 		/* Set up offload parameters for this packet. */
   9153 		uint32_t cmdlen, fields, dcmdlen;
   9154 		if (m0->m_pkthdr.csum_flags &
   9155 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9156 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9157 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9158 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   9159 			    &do_csum);
   9160 		} else {
   9161 			do_csum = false;
   9162 			cmdlen = 0;
   9163 			fields = 0;
   9164 		}
   9165 
   9166 		/* Sync the DMA map. */
   9167 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9168 		    BUS_DMASYNC_PREWRITE);
   9169 
   9170 		/* Initialize the first transmit descriptor. */
   9171 		nexttx = txq->txq_next;
   9172 		if (!do_csum) {
   9173 			/* Set up a legacy descriptor */
   9174 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   9175 			    dmamap->dm_segs[0].ds_addr);
   9176 			txq->txq_descs[nexttx].wtx_cmdlen =
   9177 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   9178 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   9179 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   9180 			if (vlan_has_tag(m0)) {
   9181 				txq->txq_descs[nexttx].wtx_cmdlen |=
   9182 				    htole32(WTX_CMD_VLE);
   9183 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   9184 				    htole16(vlan_get_tag(m0));
   9185 			} else
   9186 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9187 
   9188 			dcmdlen = 0;
   9189 		} else {
   9190 			/* Set up an advanced data descriptor */
   9191 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9192 			    htole64(dmamap->dm_segs[0].ds_addr);
   9193 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   9194 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9195 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   9196 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   9197 			    htole32(fields);
   9198 			DPRINTF(sc, WM_DEBUG_TX,
   9199 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   9200 				device_xname(sc->sc_dev), nexttx,
   9201 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   9202 			DPRINTF(sc, WM_DEBUG_TX,
   9203 			    ("\t 0x%08x%08x\n", fields,
   9204 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   9205 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   9206 		}
   9207 
   9208 		lasttx = nexttx;
   9209 		nexttx = WM_NEXTTX(txq, nexttx);
   9210 		/*
   9211 		 * Fill in the next descriptors. Legacy or advanced format
   9212 		 * is the same here.
   9213 		 */
   9214 		for (seg = 1; seg < dmamap->dm_nsegs;
   9215 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   9216 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9217 			    htole64(dmamap->dm_segs[seg].ds_addr);
   9218 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9219 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   9220 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   9221 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   9222 			lasttx = nexttx;
   9223 
   9224 			DPRINTF(sc, WM_DEBUG_TX,
   9225 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   9226 				device_xname(sc->sc_dev), nexttx,
   9227 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   9228 				dmamap->dm_segs[seg].ds_len));
   9229 		}
   9230 
   9231 		KASSERT(lasttx != -1);
   9232 
   9233 		/*
   9234 		 * Set up the command byte on the last descriptor of
   9235 		 * the packet. If we're in the interrupt delay window,
   9236 		 * delay the interrupt.
   9237 		 */
   9238 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   9239 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   9240 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9241 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9242 
   9243 		txs->txs_lastdesc = lasttx;
   9244 
   9245 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9246 		    device_xname(sc->sc_dev),
   9247 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9248 
   9249 		/* Sync the descriptors we're using. */
   9250 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9251 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9252 
   9253 		/* Give the packet to the chip. */
   9254 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9255 		sent = true;
   9256 
   9257 		DPRINTF(sc, WM_DEBUG_TX,
   9258 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9259 
   9260 		DPRINTF(sc, WM_DEBUG_TX,
   9261 		    ("%s: TX: finished transmitting packet, job %d\n",
   9262 			device_xname(sc->sc_dev), txq->txq_snext));
   9263 
   9264 		/* Advance the tx pointer. */
   9265 		txq->txq_free -= txs->txs_ndesc;
   9266 		txq->txq_next = nexttx;
   9267 
   9268 		txq->txq_sfree--;
   9269 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9270 
   9271 		/* Pass the packet to any BPF listeners. */
   9272 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9273 	}
   9274 
   9275 	if (m0 != NULL) {
   9276 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9277 		WM_Q_EVCNT_INCR(txq, descdrop);
   9278 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9279 			__func__));
   9280 		m_freem(m0);
   9281 	}
   9282 
   9283 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9284 		/* No more slots; notify upper layer. */
   9285 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9286 	}
   9287 
   9288 	if (sent) {
   9289 		/* Set a watchdog timer in case the chip flakes out. */
   9290 		txq->txq_lastsent = time_uptime;
   9291 		txq->txq_sending = true;
   9292 	}
   9293 }
   9294 
   9295 static void
   9296 wm_deferred_start_locked(struct wm_txqueue *txq)
   9297 {
   9298 	struct wm_softc *sc = txq->txq_sc;
   9299 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9300 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   9301 	int qid = wmq->wmq_id;
   9302 
   9303 	KASSERT(mutex_owned(txq->txq_lock));
   9304 	KASSERT(!txq->txq_stopping);
   9305 
   9306 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   9307 		/* XXX need for ALTQ or one CPU system */
   9308 		if (qid == 0)
   9309 			wm_nq_start_locked(ifp);
   9310 		wm_nq_transmit_locked(ifp, txq);
   9311 	} else {
   9312 		/* XXX need for ALTQ or one CPU system */
   9313 		if (qid == 0)
   9314 			wm_start_locked(ifp);
   9315 		wm_transmit_locked(ifp, txq);
   9316 	}
   9317 }
   9318 
   9319 /* Interrupt */
   9320 
   9321 /*
   9322  * wm_txeof:
   9323  *
   9324  *	Helper; handle transmit interrupts.
   9325  */
   9326 static bool
   9327 wm_txeof(struct wm_txqueue *txq, u_int limit)
   9328 {
   9329 	struct wm_softc *sc = txq->txq_sc;
   9330 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9331 	struct wm_txsoft *txs;
   9332 	int count = 0;
   9333 	int i;
   9334 	uint8_t status;
   9335 	bool more = false;
   9336 
   9337 	KASSERT(mutex_owned(txq->txq_lock));
   9338 
   9339 	if (txq->txq_stopping)
   9340 		return false;
   9341 
   9342 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   9343 
   9344 	/*
   9345 	 * Go through the Tx list and free mbufs for those
   9346 	 * frames which have been transmitted.
   9347 	 */
   9348 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   9349 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   9350 		txs = &txq->txq_soft[i];
   9351 
   9352 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   9353 			device_xname(sc->sc_dev), i));
   9354 
   9355 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   9356 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9357 
   9358 		status =
   9359 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   9360 		if ((status & WTX_ST_DD) == 0) {
   9361 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   9362 			    BUS_DMASYNC_PREREAD);
   9363 			break;
   9364 		}
   9365 
   9366 		if (limit-- == 0) {
   9367 			more = true;
   9368 			DPRINTF(sc, WM_DEBUG_TX,
   9369 			    ("%s: TX: loop limited, job %d is not processed\n",
   9370 				device_xname(sc->sc_dev), i));
   9371 			break;
   9372 		}
   9373 
   9374 		count++;
   9375 		DPRINTF(sc, WM_DEBUG_TX,
   9376 		    ("%s: TX: job %d done: descs %d..%d\n",
   9377 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   9378 		    txs->txs_lastdesc));
   9379 
   9380 		/*
   9381 		 * XXX We should probably be using the statistics
   9382 		 * XXX registers, but I don't know if they exist
   9383 		 * XXX on chips before the i82544.
   9384 		 */
   9385 
   9386 #ifdef WM_EVENT_COUNTERS
   9387 		if (status & WTX_ST_TU)
   9388 			WM_Q_EVCNT_INCR(txq, underrun);
   9389 #endif /* WM_EVENT_COUNTERS */
   9390 
   9391 		/*
   9392 		 * 82574 and newer's document says the status field has neither
   9393 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   9394 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   9395 		 * Developer's Manual", 82574 datasheet and newer.
   9396 		 *
   9397 		 * XXX I saw the LC bit was set on I218 even though the media
   9398 		 * was full duplex, so the bit might be used for other
   9399 		 * meaning ...(I have no document).
   9400 		 */
   9401 
   9402 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   9403 		    && ((sc->sc_type < WM_T_82574)
   9404 			|| (sc->sc_type == WM_T_80003))) {
   9405 			if_statinc(ifp, if_oerrors);
   9406 			if (status & WTX_ST_LC)
   9407 				log(LOG_WARNING, "%s: late collision\n",
   9408 				    device_xname(sc->sc_dev));
   9409 			else if (status & WTX_ST_EC) {
   9410 				if_statadd(ifp, if_collisions,
   9411 				    TX_COLLISION_THRESHOLD + 1);
   9412 				log(LOG_WARNING, "%s: excessive collisions\n",
   9413 				    device_xname(sc->sc_dev));
   9414 			}
   9415 		} else
   9416 			if_statinc(ifp, if_opackets);
   9417 
   9418 		txq->txq_packets++;
   9419 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   9420 
   9421 		txq->txq_free += txs->txs_ndesc;
   9422 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   9423 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   9424 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   9425 		m_freem(txs->txs_mbuf);
   9426 		txs->txs_mbuf = NULL;
   9427 	}
   9428 
   9429 	/* Update the dirty transmit buffer pointer. */
   9430 	txq->txq_sdirty = i;
   9431 	DPRINTF(sc, WM_DEBUG_TX,
   9432 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   9433 
   9434 	if (count != 0)
   9435 		rnd_add_uint32(&sc->rnd_source, count);
   9436 
   9437 	/*
   9438 	 * If there are no more pending transmissions, cancel the watchdog
   9439 	 * timer.
   9440 	 */
   9441 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9442 		txq->txq_sending = false;
   9443 
   9444 	return more;
   9445 }
   9446 
   9447 static inline uint32_t
   9448 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9449 {
   9450 	struct wm_softc *sc = rxq->rxq_sc;
   9451 
   9452 	if (sc->sc_type == WM_T_82574)
   9453 		return EXTRXC_STATUS(
   9454 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9455 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9456 		return NQRXC_STATUS(
   9457 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9458 	else
   9459 		return rxq->rxq_descs[idx].wrx_status;
   9460 }
   9461 
   9462 static inline uint32_t
   9463 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9464 {
   9465 	struct wm_softc *sc = rxq->rxq_sc;
   9466 
   9467 	if (sc->sc_type == WM_T_82574)
   9468 		return EXTRXC_ERROR(
   9469 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9470 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9471 		return NQRXC_ERROR(
   9472 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9473 	else
   9474 		return rxq->rxq_descs[idx].wrx_errors;
   9475 }
   9476 
   9477 static inline uint16_t
   9478 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9479 {
   9480 	struct wm_softc *sc = rxq->rxq_sc;
   9481 
   9482 	if (sc->sc_type == WM_T_82574)
   9483 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9484 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9485 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9486 	else
   9487 		return rxq->rxq_descs[idx].wrx_special;
   9488 }
   9489 
   9490 static inline int
   9491 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9492 {
   9493 	struct wm_softc *sc = rxq->rxq_sc;
   9494 
   9495 	if (sc->sc_type == WM_T_82574)
   9496 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9497 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9498 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9499 	else
   9500 		return rxq->rxq_descs[idx].wrx_len;
   9501 }
   9502 
   9503 #ifdef WM_DEBUG
   9504 static inline uint32_t
   9505 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9506 {
   9507 	struct wm_softc *sc = rxq->rxq_sc;
   9508 
   9509 	if (sc->sc_type == WM_T_82574)
   9510 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9511 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9512 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9513 	else
   9514 		return 0;
   9515 }
   9516 
   9517 static inline uint8_t
   9518 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9519 {
   9520 	struct wm_softc *sc = rxq->rxq_sc;
   9521 
   9522 	if (sc->sc_type == WM_T_82574)
   9523 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9524 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9525 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9526 	else
   9527 		return 0;
   9528 }
   9529 #endif /* WM_DEBUG */
   9530 
   9531 static inline bool
   9532 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9533     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9534 {
   9535 
   9536 	if (sc->sc_type == WM_T_82574)
   9537 		return (status & ext_bit) != 0;
   9538 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9539 		return (status & nq_bit) != 0;
   9540 	else
   9541 		return (status & legacy_bit) != 0;
   9542 }
   9543 
   9544 static inline bool
   9545 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9546     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9547 {
   9548 
   9549 	if (sc->sc_type == WM_T_82574)
   9550 		return (error & ext_bit) != 0;
   9551 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9552 		return (error & nq_bit) != 0;
   9553 	else
   9554 		return (error & legacy_bit) != 0;
   9555 }
   9556 
   9557 static inline bool
   9558 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9559 {
   9560 
   9561 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9562 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9563 		return true;
   9564 	else
   9565 		return false;
   9566 }
   9567 
   9568 static inline bool
   9569 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9570 {
   9571 	struct wm_softc *sc = rxq->rxq_sc;
   9572 
   9573 	/* XXX missing error bit for newqueue? */
   9574 	if (wm_rxdesc_is_set_error(sc, errors,
   9575 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9576 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9577 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9578 		NQRXC_ERROR_RXE)) {
   9579 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9580 		    EXTRXC_ERROR_SE, 0))
   9581 			log(LOG_WARNING, "%s: symbol error\n",
   9582 			    device_xname(sc->sc_dev));
   9583 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9584 		    EXTRXC_ERROR_SEQ, 0))
   9585 			log(LOG_WARNING, "%s: receive sequence error\n",
   9586 			    device_xname(sc->sc_dev));
   9587 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9588 		    EXTRXC_ERROR_CE, 0))
   9589 			log(LOG_WARNING, "%s: CRC error\n",
   9590 			    device_xname(sc->sc_dev));
   9591 		return true;
   9592 	}
   9593 
   9594 	return false;
   9595 }
   9596 
   9597 static inline bool
   9598 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9599 {
   9600 	struct wm_softc *sc = rxq->rxq_sc;
   9601 
   9602 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9603 		NQRXC_STATUS_DD)) {
   9604 		/* We have processed all of the receive descriptors. */
   9605 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9606 		return false;
   9607 	}
   9608 
   9609 	return true;
   9610 }
   9611 
   9612 static inline bool
   9613 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9614     uint16_t vlantag, struct mbuf *m)
   9615 {
   9616 
   9617 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9618 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9619 		vlan_set_tag(m, le16toh(vlantag));
   9620 	}
   9621 
   9622 	return true;
   9623 }
   9624 
   9625 static inline void
   9626 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9627     uint32_t errors, struct mbuf *m)
   9628 {
   9629 	struct wm_softc *sc = rxq->rxq_sc;
   9630 
   9631 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9632 		if (wm_rxdesc_is_set_status(sc, status,
   9633 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9634 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9635 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9636 			if (wm_rxdesc_is_set_error(sc, errors,
   9637 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9638 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9639 		}
   9640 		if (wm_rxdesc_is_set_status(sc, status,
   9641 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9642 			/*
   9643 			 * Note: we don't know if this was TCP or UDP,
   9644 			 * so we just set both bits, and expect the
   9645 			 * upper layers to deal.
   9646 			 */
   9647 			WM_Q_EVCNT_INCR(rxq, tusum);
   9648 			m->m_pkthdr.csum_flags |=
   9649 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9650 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9651 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9652 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9653 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9654 		}
   9655 	}
   9656 }
   9657 
   9658 /*
   9659  * wm_rxeof:
   9660  *
   9661  *	Helper; handle receive interrupts.
   9662  */
   9663 static bool
   9664 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9665 {
   9666 	struct wm_softc *sc = rxq->rxq_sc;
   9667 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9668 	struct wm_rxsoft *rxs;
   9669 	struct mbuf *m;
   9670 	int i, len;
   9671 	int count = 0;
   9672 	uint32_t status, errors;
   9673 	uint16_t vlantag;
   9674 	bool more = false;
   9675 
   9676 	KASSERT(mutex_owned(rxq->rxq_lock));
   9677 
   9678 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9679 		rxs = &rxq->rxq_soft[i];
   9680 
   9681 		DPRINTF(sc, WM_DEBUG_RX,
   9682 		    ("%s: RX: checking descriptor %d\n",
   9683 			device_xname(sc->sc_dev), i));
   9684 		wm_cdrxsync(rxq, i,
   9685 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9686 
   9687 		status = wm_rxdesc_get_status(rxq, i);
   9688 		errors = wm_rxdesc_get_errors(rxq, i);
   9689 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9690 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9691 #ifdef WM_DEBUG
   9692 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9693 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9694 #endif
   9695 
   9696 		if (!wm_rxdesc_dd(rxq, i, status))
   9697 			break;
   9698 
   9699 		if (limit-- == 0) {
   9700 			more = true;
   9701 			DPRINTF(sc, WM_DEBUG_RX,
   9702 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9703 				device_xname(sc->sc_dev), i));
   9704 			break;
   9705 		}
   9706 
   9707 		count++;
   9708 		if (__predict_false(rxq->rxq_discard)) {
   9709 			DPRINTF(sc, WM_DEBUG_RX,
   9710 			    ("%s: RX: discarding contents of descriptor %d\n",
   9711 				device_xname(sc->sc_dev), i));
   9712 			wm_init_rxdesc(rxq, i);
   9713 			if (wm_rxdesc_is_eop(rxq, status)) {
   9714 				/* Reset our state. */
   9715 				DPRINTF(sc, WM_DEBUG_RX,
   9716 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9717 					device_xname(sc->sc_dev)));
   9718 				rxq->rxq_discard = 0;
   9719 			}
   9720 			continue;
   9721 		}
   9722 
   9723 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9724 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9725 
   9726 		m = rxs->rxs_mbuf;
   9727 
   9728 		/*
   9729 		 * Add a new receive buffer to the ring, unless of
   9730 		 * course the length is zero. Treat the latter as a
   9731 		 * failed mapping.
   9732 		 */
   9733 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9734 			/*
   9735 			 * Failed, throw away what we've done so
   9736 			 * far, and discard the rest of the packet.
   9737 			 */
   9738 			if_statinc(ifp, if_ierrors);
   9739 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9740 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9741 			wm_init_rxdesc(rxq, i);
   9742 			if (!wm_rxdesc_is_eop(rxq, status))
   9743 				rxq->rxq_discard = 1;
   9744 			if (rxq->rxq_head != NULL)
   9745 				m_freem(rxq->rxq_head);
   9746 			WM_RXCHAIN_RESET(rxq);
   9747 			DPRINTF(sc, WM_DEBUG_RX,
   9748 			    ("%s: RX: Rx buffer allocation failed, "
   9749 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9750 				rxq->rxq_discard ? " (discard)" : ""));
   9751 			continue;
   9752 		}
   9753 
   9754 		m->m_len = len;
   9755 		rxq->rxq_len += len;
   9756 		DPRINTF(sc, WM_DEBUG_RX,
   9757 		    ("%s: RX: buffer at %p len %d\n",
   9758 			device_xname(sc->sc_dev), m->m_data, len));
   9759 
   9760 		/* If this is not the end of the packet, keep looking. */
   9761 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9762 			WM_RXCHAIN_LINK(rxq, m);
   9763 			DPRINTF(sc, WM_DEBUG_RX,
   9764 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9765 				device_xname(sc->sc_dev), rxq->rxq_len));
   9766 			continue;
   9767 		}
   9768 
   9769 		/*
   9770 		 * Okay, we have the entire packet now. The chip is
   9771 		 * configured to include the FCS except I35[04], I21[01].
   9772 		 * (not all chips can be configured to strip it), so we need
   9773 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9774 		 * in RCTL register is always set, so we don't trim it.
   9775 		 * PCH2 and newer chip also not include FCS when jumbo
   9776 		 * frame is used to do workaround an errata.
   9777 		 * May need to adjust length of previous mbuf in the
   9778 		 * chain if the current mbuf is too short.
   9779 		 */
   9780 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9781 			if (m->m_len < ETHER_CRC_LEN) {
   9782 				rxq->rxq_tail->m_len
   9783 				    -= (ETHER_CRC_LEN - m->m_len);
   9784 				m->m_len = 0;
   9785 			} else
   9786 				m->m_len -= ETHER_CRC_LEN;
   9787 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9788 		} else
   9789 			len = rxq->rxq_len;
   9790 
   9791 		WM_RXCHAIN_LINK(rxq, m);
   9792 
   9793 		*rxq->rxq_tailp = NULL;
   9794 		m = rxq->rxq_head;
   9795 
   9796 		WM_RXCHAIN_RESET(rxq);
   9797 
   9798 		DPRINTF(sc, WM_DEBUG_RX,
   9799 		    ("%s: RX: have entire packet, len -> %d\n",
   9800 			device_xname(sc->sc_dev), len));
   9801 
   9802 		/* If an error occurred, update stats and drop the packet. */
   9803 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9804 			m_freem(m);
   9805 			continue;
   9806 		}
   9807 
   9808 		/* No errors.  Receive the packet. */
   9809 		m_set_rcvif(m, ifp);
   9810 		m->m_pkthdr.len = len;
   9811 		/*
   9812 		 * TODO
   9813 		 * should be save rsshash and rsstype to this mbuf.
   9814 		 */
   9815 		DPRINTF(sc, WM_DEBUG_RX,
   9816 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9817 			device_xname(sc->sc_dev), rsstype, rsshash));
   9818 
   9819 		/*
   9820 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9821 		 * for us.  Associate the tag with the packet.
   9822 		 */
   9823 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9824 			continue;
   9825 
   9826 		/* Set up checksum info for this packet. */
   9827 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9828 
   9829 		rxq->rxq_packets++;
   9830 		rxq->rxq_bytes += len;
   9831 		/* Pass it on. */
   9832 		if_percpuq_enqueue(sc->sc_ipq, m);
   9833 
   9834 		if (rxq->rxq_stopping)
   9835 			break;
   9836 	}
   9837 	rxq->rxq_ptr = i;
   9838 
   9839 	if (count != 0)
   9840 		rnd_add_uint32(&sc->rnd_source, count);
   9841 
   9842 	DPRINTF(sc, WM_DEBUG_RX,
   9843 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9844 
   9845 	return more;
   9846 }
   9847 
   9848 /*
   9849  * wm_linkintr_gmii:
   9850  *
   9851  *	Helper; handle link interrupts for GMII.
   9852  */
   9853 static void
   9854 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9855 {
   9856 	device_t dev = sc->sc_dev;
   9857 	uint32_t status, reg;
   9858 	bool link;
   9859 	int rv;
   9860 
   9861 	KASSERT(WM_CORE_LOCKED(sc));
   9862 
   9863 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9864 		__func__));
   9865 
   9866 	if ((icr & ICR_LSC) == 0) {
   9867 		if (icr & ICR_RXSEQ)
   9868 			DPRINTF(sc, WM_DEBUG_LINK,
   9869 			    ("%s: LINK Receive sequence error\n",
   9870 				device_xname(dev)));
   9871 		return;
   9872 	}
   9873 
   9874 	/* Link status changed */
   9875 	status = CSR_READ(sc, WMREG_STATUS);
   9876 	link = status & STATUS_LU;
   9877 	if (link) {
   9878 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9879 			device_xname(dev),
   9880 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9881 		if (wm_phy_need_linkdown_discard(sc)) {
   9882 			DPRINTF(sc, WM_DEBUG_LINK,
   9883 			    ("%s: linkintr: Clear linkdown discard flag\n",
   9884 				device_xname(dev)));
   9885 			wm_clear_linkdown_discard(sc);
   9886 		}
   9887 	} else {
   9888 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9889 			device_xname(dev)));
   9890 		if (wm_phy_need_linkdown_discard(sc)) {
   9891 			DPRINTF(sc, WM_DEBUG_LINK,
   9892 			    ("%s: linkintr: Set linkdown discard flag\n",
   9893 				device_xname(dev)));
   9894 			wm_set_linkdown_discard(sc);
   9895 		}
   9896 	}
   9897 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9898 		wm_gig_downshift_workaround_ich8lan(sc);
   9899 
   9900 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
   9901 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9902 
   9903 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9904 		device_xname(dev)));
   9905 	mii_pollstat(&sc->sc_mii);
   9906 	if (sc->sc_type == WM_T_82543) {
   9907 		int miistatus, active;
   9908 
   9909 		/*
   9910 		 * With 82543, we need to force speed and
   9911 		 * duplex on the MAC equal to what the PHY
   9912 		 * speed and duplex configuration is.
   9913 		 */
   9914 		miistatus = sc->sc_mii.mii_media_status;
   9915 
   9916 		if (miistatus & IFM_ACTIVE) {
   9917 			active = sc->sc_mii.mii_media_active;
   9918 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9919 			switch (IFM_SUBTYPE(active)) {
   9920 			case IFM_10_T:
   9921 				sc->sc_ctrl |= CTRL_SPEED_10;
   9922 				break;
   9923 			case IFM_100_TX:
   9924 				sc->sc_ctrl |= CTRL_SPEED_100;
   9925 				break;
   9926 			case IFM_1000_T:
   9927 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9928 				break;
   9929 			default:
   9930 				/*
   9931 				 * Fiber?
   9932 				 * Shoud not enter here.
   9933 				 */
   9934 				device_printf(dev, "unknown media (%x)\n",
   9935 				    active);
   9936 				break;
   9937 			}
   9938 			if (active & IFM_FDX)
   9939 				sc->sc_ctrl |= CTRL_FD;
   9940 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9941 		}
   9942 	} else if (sc->sc_type == WM_T_PCH) {
   9943 		wm_k1_gig_workaround_hv(sc,
   9944 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9945 	}
   9946 
   9947 	/*
   9948 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9949 	 * aggressive resulting in many collisions. To avoid this, increase
   9950 	 * the IPG and reduce Rx latency in the PHY.
   9951 	 */
   9952 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9953 	    && link) {
   9954 		uint32_t tipg_reg;
   9955 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9956 		bool fdx;
   9957 		uint16_t emi_addr, emi_val;
   9958 
   9959 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9960 		tipg_reg &= ~TIPG_IPGT_MASK;
   9961 		fdx = status & STATUS_FD;
   9962 
   9963 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9964 			tipg_reg |= 0xff;
   9965 			/* Reduce Rx latency in analog PHY */
   9966 			emi_val = 0;
   9967 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9968 		    fdx && speed != STATUS_SPEED_1000) {
   9969 			tipg_reg |= 0xc;
   9970 			emi_val = 1;
   9971 		} else {
   9972 			/* Roll back the default values */
   9973 			tipg_reg |= 0x08;
   9974 			emi_val = 1;
   9975 		}
   9976 
   9977 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9978 
   9979 		rv = sc->phy.acquire(sc);
   9980 		if (rv)
   9981 			return;
   9982 
   9983 		if (sc->sc_type == WM_T_PCH2)
   9984 			emi_addr = I82579_RX_CONFIG;
   9985 		else
   9986 			emi_addr = I217_RX_CONFIG;
   9987 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9988 
   9989 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9990 			uint16_t phy_reg;
   9991 
   9992 			sc->phy.readreg_locked(dev, 2,
   9993 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9994 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9995 			if (speed == STATUS_SPEED_100
   9996 			    || speed == STATUS_SPEED_10)
   9997 				phy_reg |= 0x3e8;
   9998 			else
   9999 				phy_reg |= 0xfa;
   10000 			sc->phy.writereg_locked(dev, 2,
   10001 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   10002 
   10003 			if (speed == STATUS_SPEED_1000) {
   10004 				sc->phy.readreg_locked(dev, 2,
   10005 				    HV_PM_CTRL, &phy_reg);
   10006 
   10007 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   10008 
   10009 				sc->phy.writereg_locked(dev, 2,
   10010 				    HV_PM_CTRL, phy_reg);
   10011 			}
   10012 		}
   10013 		sc->phy.release(sc);
   10014 
   10015 		if (rv)
   10016 			return;
   10017 
   10018 		if (sc->sc_type >= WM_T_PCH_SPT) {
   10019 			uint16_t data, ptr_gap;
   10020 
   10021 			if (speed == STATUS_SPEED_1000) {
   10022 				rv = sc->phy.acquire(sc);
   10023 				if (rv)
   10024 					return;
   10025 
   10026 				rv = sc->phy.readreg_locked(dev, 2,
   10027 				    I82579_UNKNOWN1, &data);
   10028 				if (rv) {
   10029 					sc->phy.release(sc);
   10030 					return;
   10031 				}
   10032 
   10033 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   10034 				if (ptr_gap < 0x18) {
   10035 					data &= ~(0x3ff << 2);
   10036 					data |= (0x18 << 2);
   10037 					rv = sc->phy.writereg_locked(dev,
   10038 					    2, I82579_UNKNOWN1, data);
   10039 				}
   10040 				sc->phy.release(sc);
   10041 				if (rv)
   10042 					return;
   10043 			} else {
   10044 				rv = sc->phy.acquire(sc);
   10045 				if (rv)
   10046 					return;
   10047 
   10048 				rv = sc->phy.writereg_locked(dev, 2,
   10049 				    I82579_UNKNOWN1, 0xc023);
   10050 				sc->phy.release(sc);
   10051 				if (rv)
   10052 					return;
   10053 
   10054 			}
   10055 		}
   10056 	}
   10057 
   10058 	/*
   10059 	 * I217 Packet Loss issue:
   10060 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   10061 	 * on power up.
   10062 	 * Set the Beacon Duration for I217 to 8 usec
   10063 	 */
   10064 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10065 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   10066 		reg &= ~FEXTNVM4_BEACON_DURATION;
   10067 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   10068 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   10069 	}
   10070 
   10071 	/* Work-around I218 hang issue */
   10072 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   10073 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   10074 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   10075 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   10076 		wm_k1_workaround_lpt_lp(sc, link);
   10077 
   10078 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10079 		/*
   10080 		 * Set platform power management values for Latency
   10081 		 * Tolerance Reporting (LTR)
   10082 		 */
   10083 		wm_platform_pm_pch_lpt(sc,
   10084 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10085 	}
   10086 
   10087 	/* Clear link partner's EEE ability */
   10088 	sc->eee_lp_ability = 0;
   10089 
   10090 	/* FEXTNVM6 K1-off workaround */
   10091 	if (sc->sc_type == WM_T_PCH_SPT) {
   10092 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   10093 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   10094 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   10095 		else
   10096 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   10097 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   10098 	}
   10099 
   10100 	if (!link)
   10101 		return;
   10102 
   10103 	switch (sc->sc_type) {
   10104 	case WM_T_PCH2:
   10105 		wm_k1_workaround_lv(sc);
   10106 		/* FALLTHROUGH */
   10107 	case WM_T_PCH:
   10108 		if (sc->sc_phytype == WMPHY_82578)
   10109 			wm_link_stall_workaround_hv(sc);
   10110 		break;
   10111 	default:
   10112 		break;
   10113 	}
   10114 
   10115 	/* Enable/Disable EEE after link up */
   10116 	if (sc->sc_phytype > WMPHY_82579)
   10117 		wm_set_eee_pchlan(sc);
   10118 }
   10119 
   10120 /*
   10121  * wm_linkintr_tbi:
   10122  *
   10123  *	Helper; handle link interrupts for TBI mode.
   10124  */
   10125 static void
   10126 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   10127 {
   10128 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10129 	uint32_t status;
   10130 
   10131 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10132 		__func__));
   10133 
   10134 	status = CSR_READ(sc, WMREG_STATUS);
   10135 	if (icr & ICR_LSC) {
   10136 		wm_check_for_link(sc);
   10137 		if (status & STATUS_LU) {
   10138 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10139 				device_xname(sc->sc_dev),
   10140 				(status & STATUS_FD) ? "FDX" : "HDX"));
   10141 			/*
   10142 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10143 			 * so we should update sc->sc_ctrl
   10144 			 */
   10145 
   10146 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10147 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10148 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10149 			if (status & STATUS_FD)
   10150 				sc->sc_tctl |=
   10151 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10152 			else
   10153 				sc->sc_tctl |=
   10154 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10155 			if (sc->sc_ctrl & CTRL_TFCE)
   10156 				sc->sc_fcrtl |= FCRTL_XONE;
   10157 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10158 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10159 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   10160 			sc->sc_tbi_linkup = 1;
   10161 			if_link_state_change(ifp, LINK_STATE_UP);
   10162 		} else {
   10163 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10164 				device_xname(sc->sc_dev)));
   10165 			sc->sc_tbi_linkup = 0;
   10166 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10167 		}
   10168 		/* Update LED */
   10169 		wm_tbi_serdes_set_linkled(sc);
   10170 	} else if (icr & ICR_RXSEQ)
   10171 		DPRINTF(sc, WM_DEBUG_LINK,
   10172 		    ("%s: LINK: Receive sequence error\n",
   10173 			device_xname(sc->sc_dev)));
   10174 }
   10175 
   10176 /*
   10177  * wm_linkintr_serdes:
   10178  *
   10179  *	Helper; handle link interrupts for TBI mode.
   10180  */
   10181 static void
   10182 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   10183 {
   10184 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10185 	struct mii_data *mii = &sc->sc_mii;
   10186 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10187 	uint32_t pcs_adv, pcs_lpab, reg;
   10188 
   10189 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10190 		__func__));
   10191 
   10192 	if (icr & ICR_LSC) {
   10193 		/* Check PCS */
   10194 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10195 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   10196 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   10197 				device_xname(sc->sc_dev)));
   10198 			mii->mii_media_status |= IFM_ACTIVE;
   10199 			sc->sc_tbi_linkup = 1;
   10200 			if_link_state_change(ifp, LINK_STATE_UP);
   10201 		} else {
   10202 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10203 				device_xname(sc->sc_dev)));
   10204 			mii->mii_media_status |= IFM_NONE;
   10205 			sc->sc_tbi_linkup = 0;
   10206 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10207 			wm_tbi_serdes_set_linkled(sc);
   10208 			return;
   10209 		}
   10210 		mii->mii_media_active |= IFM_1000_SX;
   10211 		if ((reg & PCS_LSTS_FDX) != 0)
   10212 			mii->mii_media_active |= IFM_FDX;
   10213 		else
   10214 			mii->mii_media_active |= IFM_HDX;
   10215 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10216 			/* Check flow */
   10217 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10218 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10219 				DPRINTF(sc, WM_DEBUG_LINK,
   10220 				    ("XXX LINKOK but not ACOMP\n"));
   10221 				return;
   10222 			}
   10223 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10224 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10225 			DPRINTF(sc, WM_DEBUG_LINK,
   10226 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   10227 			if ((pcs_adv & TXCW_SYM_PAUSE)
   10228 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10229 				mii->mii_media_active |= IFM_FLOW
   10230 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10231 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10232 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10233 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   10234 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10235 				mii->mii_media_active |= IFM_FLOW
   10236 				    | IFM_ETH_TXPAUSE;
   10237 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   10238 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10239 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10240 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10241 				mii->mii_media_active |= IFM_FLOW
   10242 				    | IFM_ETH_RXPAUSE;
   10243 		}
   10244 		/* Update LED */
   10245 		wm_tbi_serdes_set_linkled(sc);
   10246 	} else
   10247 		DPRINTF(sc, WM_DEBUG_LINK,
   10248 		    ("%s: LINK: Receive sequence error\n",
   10249 		    device_xname(sc->sc_dev)));
   10250 }
   10251 
   10252 /*
   10253  * wm_linkintr:
   10254  *
   10255  *	Helper; handle link interrupts.
   10256  */
   10257 static void
   10258 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   10259 {
   10260 
   10261 	KASSERT(WM_CORE_LOCKED(sc));
   10262 
   10263 	if (sc->sc_flags & WM_F_HAS_MII)
   10264 		wm_linkintr_gmii(sc, icr);
   10265 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10266 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   10267 		wm_linkintr_serdes(sc, icr);
   10268 	else
   10269 		wm_linkintr_tbi(sc, icr);
   10270 }
   10271 
   10272 
   10273 static inline void
   10274 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   10275 {
   10276 
   10277 	if (wmq->wmq_txrx_use_workqueue)
   10278 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   10279 	else
   10280 		softint_schedule(wmq->wmq_si);
   10281 }
   10282 
   10283 static inline void
   10284 wm_legacy_intr_disable(struct wm_softc *sc)
   10285 {
   10286 
   10287 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   10288 }
   10289 
   10290 static inline void
   10291 wm_legacy_intr_enable(struct wm_softc *sc)
   10292 {
   10293 
   10294 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   10295 }
   10296 
   10297 /*
   10298  * wm_intr_legacy:
   10299  *
   10300  *	Interrupt service routine for INTx and MSI.
   10301  */
   10302 static int
   10303 wm_intr_legacy(void *arg)
   10304 {
   10305 	struct wm_softc *sc = arg;
   10306 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10307 	struct wm_queue *wmq = &sc->sc_queue[0];
   10308 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10309 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10310 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10311 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10312 	uint32_t icr, rndval = 0;
   10313 	bool more = false;
   10314 
   10315 	icr = CSR_READ(sc, WMREG_ICR);
   10316 	if ((icr & sc->sc_icr) == 0)
   10317 		return 0;
   10318 
   10319 	DPRINTF(sc, WM_DEBUG_TX,
   10320 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   10321 	if (rndval == 0)
   10322 		rndval = icr;
   10323 
   10324 	mutex_enter(txq->txq_lock);
   10325 
   10326 	if (txq->txq_stopping) {
   10327 		mutex_exit(txq->txq_lock);
   10328 		return 1;
   10329 	}
   10330 
   10331 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10332 	if (icr & ICR_TXDW) {
   10333 		DPRINTF(sc, WM_DEBUG_TX,
   10334 		    ("%s: TX: got TXDW interrupt\n",
   10335 			device_xname(sc->sc_dev)));
   10336 		WM_Q_EVCNT_INCR(txq, txdw);
   10337 	}
   10338 #endif
   10339 	if (txlimit > 0) {
   10340 		more |= wm_txeof(txq, txlimit);
   10341 		if (!IF_IS_EMPTY(&ifp->if_snd))
   10342 			more = true;
   10343 	} else
   10344 		more = true;
   10345 	mutex_exit(txq->txq_lock);
   10346 
   10347 	mutex_enter(rxq->rxq_lock);
   10348 
   10349 	if (rxq->rxq_stopping) {
   10350 		mutex_exit(rxq->rxq_lock);
   10351 		return 1;
   10352 	}
   10353 
   10354 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10355 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   10356 		DPRINTF(sc, WM_DEBUG_RX,
   10357 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
   10358 			device_xname(sc->sc_dev),
   10359 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   10360 		WM_Q_EVCNT_INCR(rxq, intr);
   10361 	}
   10362 #endif
   10363 	if (rxlimit > 0) {
   10364 		/*
   10365 		 * wm_rxeof() does *not* call upper layer functions directly,
   10366 		 * as if_percpuq_enqueue() just call softint_schedule().
   10367 		 * So, we can call wm_rxeof() in interrupt context.
   10368 		 */
   10369 		more = wm_rxeof(rxq, rxlimit);
   10370 	} else
   10371 		more = true;
   10372 
   10373 	mutex_exit(rxq->rxq_lock);
   10374 
   10375 	WM_CORE_LOCK(sc);
   10376 
   10377 	if (sc->sc_core_stopping) {
   10378 		WM_CORE_UNLOCK(sc);
   10379 		return 1;
   10380 	}
   10381 
   10382 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   10383 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10384 		wm_linkintr(sc, icr);
   10385 	}
   10386 	if ((icr & ICR_GPI(0)) != 0)
   10387 		device_printf(sc->sc_dev, "got module interrupt\n");
   10388 
   10389 	WM_CORE_UNLOCK(sc);
   10390 
   10391 	if (icr & ICR_RXO) {
   10392 #if defined(WM_DEBUG)
   10393 		log(LOG_WARNING, "%s: Receive overrun\n",
   10394 		    device_xname(sc->sc_dev));
   10395 #endif /* defined(WM_DEBUG) */
   10396 	}
   10397 
   10398 	rnd_add_uint32(&sc->rnd_source, rndval);
   10399 
   10400 	if (more) {
   10401 		/* Try to get more packets going. */
   10402 		wm_legacy_intr_disable(sc);
   10403 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10404 		wm_sched_handle_queue(sc, wmq);
   10405 	}
   10406 
   10407 	return 1;
   10408 }
   10409 
   10410 static inline void
   10411 wm_txrxintr_disable(struct wm_queue *wmq)
   10412 {
   10413 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10414 
   10415 	if (__predict_false(!wm_is_using_msix(sc))) {
   10416 		wm_legacy_intr_disable(sc);
   10417 		return;
   10418 	}
   10419 
   10420 	if (sc->sc_type == WM_T_82574)
   10421 		CSR_WRITE(sc, WMREG_IMC,
   10422 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   10423 	else if (sc->sc_type == WM_T_82575)
   10424 		CSR_WRITE(sc, WMREG_EIMC,
   10425 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10426 	else
   10427 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   10428 }
   10429 
   10430 static inline void
   10431 wm_txrxintr_enable(struct wm_queue *wmq)
   10432 {
   10433 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10434 
   10435 	wm_itrs_calculate(sc, wmq);
   10436 
   10437 	if (__predict_false(!wm_is_using_msix(sc))) {
   10438 		wm_legacy_intr_enable(sc);
   10439 		return;
   10440 	}
   10441 
   10442 	/*
   10443 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   10444 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   10445 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   10446 	 * while each wm_handle_queue(wmq) is runnig.
   10447 	 */
   10448 	if (sc->sc_type == WM_T_82574)
   10449 		CSR_WRITE(sc, WMREG_IMS,
   10450 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   10451 	else if (sc->sc_type == WM_T_82575)
   10452 		CSR_WRITE(sc, WMREG_EIMS,
   10453 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10454 	else
   10455 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   10456 }
   10457 
   10458 static int
   10459 wm_txrxintr_msix(void *arg)
   10460 {
   10461 	struct wm_queue *wmq = arg;
   10462 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10463 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10464 	struct wm_softc *sc = txq->txq_sc;
   10465 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10466 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10467 	bool txmore;
   10468 	bool rxmore;
   10469 
   10470 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   10471 
   10472 	DPRINTF(sc, WM_DEBUG_TX,
   10473 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   10474 
   10475 	wm_txrxintr_disable(wmq);
   10476 
   10477 	mutex_enter(txq->txq_lock);
   10478 
   10479 	if (txq->txq_stopping) {
   10480 		mutex_exit(txq->txq_lock);
   10481 		return 1;
   10482 	}
   10483 
   10484 	WM_Q_EVCNT_INCR(txq, txdw);
   10485 	if (txlimit > 0) {
   10486 		txmore = wm_txeof(txq, txlimit);
   10487 		/* wm_deferred start() is done in wm_handle_queue(). */
   10488 	} else
   10489 		txmore = true;
   10490 	mutex_exit(txq->txq_lock);
   10491 
   10492 	DPRINTF(sc, WM_DEBUG_RX,
   10493 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   10494 	mutex_enter(rxq->rxq_lock);
   10495 
   10496 	if (rxq->rxq_stopping) {
   10497 		mutex_exit(rxq->rxq_lock);
   10498 		return 1;
   10499 	}
   10500 
   10501 	WM_Q_EVCNT_INCR(rxq, intr);
   10502 	if (rxlimit > 0) {
   10503 		rxmore = wm_rxeof(rxq, rxlimit);
   10504 	} else
   10505 		rxmore = true;
   10506 	mutex_exit(rxq->rxq_lock);
   10507 
   10508 	wm_itrs_writereg(sc, wmq);
   10509 
   10510 	if (txmore || rxmore) {
   10511 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10512 		wm_sched_handle_queue(sc, wmq);
   10513 	} else
   10514 		wm_txrxintr_enable(wmq);
   10515 
   10516 	return 1;
   10517 }
   10518 
   10519 static void
   10520 wm_handle_queue(void *arg)
   10521 {
   10522 	struct wm_queue *wmq = arg;
   10523 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10524 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10525 	struct wm_softc *sc = txq->txq_sc;
   10526 	u_int txlimit = sc->sc_tx_process_limit;
   10527 	u_int rxlimit = sc->sc_rx_process_limit;
   10528 	bool txmore;
   10529 	bool rxmore;
   10530 
   10531 	mutex_enter(txq->txq_lock);
   10532 	if (txq->txq_stopping) {
   10533 		mutex_exit(txq->txq_lock);
   10534 		return;
   10535 	}
   10536 	txmore = wm_txeof(txq, txlimit);
   10537 	wm_deferred_start_locked(txq);
   10538 	mutex_exit(txq->txq_lock);
   10539 
   10540 	mutex_enter(rxq->rxq_lock);
   10541 	if (rxq->rxq_stopping) {
   10542 		mutex_exit(rxq->rxq_lock);
   10543 		return;
   10544 	}
   10545 	WM_Q_EVCNT_INCR(rxq, defer);
   10546 	rxmore = wm_rxeof(rxq, rxlimit);
   10547 	mutex_exit(rxq->rxq_lock);
   10548 
   10549 	if (txmore || rxmore) {
   10550 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10551 		wm_sched_handle_queue(sc, wmq);
   10552 	} else
   10553 		wm_txrxintr_enable(wmq);
   10554 }
   10555 
   10556 static void
   10557 wm_handle_queue_work(struct work *wk, void *context)
   10558 {
   10559 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10560 
   10561 	/*
   10562 	 * "enqueued flag" is not required here.
   10563 	 */
   10564 	wm_handle_queue(wmq);
   10565 }
   10566 
   10567 /*
   10568  * wm_linkintr_msix:
   10569  *
   10570  *	Interrupt service routine for link status change for MSI-X.
   10571  */
   10572 static int
   10573 wm_linkintr_msix(void *arg)
   10574 {
   10575 	struct wm_softc *sc = arg;
   10576 	uint32_t reg;
   10577 	bool has_rxo;
   10578 
   10579 	reg = CSR_READ(sc, WMREG_ICR);
   10580 	WM_CORE_LOCK(sc);
   10581 	DPRINTF(sc, WM_DEBUG_LINK,
   10582 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10583 		device_xname(sc->sc_dev), reg));
   10584 
   10585 	if (sc->sc_core_stopping)
   10586 		goto out;
   10587 
   10588 	if ((reg & ICR_LSC) != 0) {
   10589 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10590 		wm_linkintr(sc, ICR_LSC);
   10591 	}
   10592 	if ((reg & ICR_GPI(0)) != 0)
   10593 		device_printf(sc->sc_dev, "got module interrupt\n");
   10594 
   10595 	/*
   10596 	 * XXX 82574 MSI-X mode workaround
   10597 	 *
   10598 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10599 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10600 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10601 	 * interrupts by writing WMREG_ICS to process receive packets.
   10602 	 */
   10603 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10604 #if defined(WM_DEBUG)
   10605 		log(LOG_WARNING, "%s: Receive overrun\n",
   10606 		    device_xname(sc->sc_dev));
   10607 #endif /* defined(WM_DEBUG) */
   10608 
   10609 		has_rxo = true;
   10610 		/*
   10611 		 * The RXO interrupt is very high rate when receive traffic is
   10612 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10613 		 * interrupts. ICR_OTHER will be enabled at the end of
   10614 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10615 		 * ICR_RXQ(1) interrupts.
   10616 		 */
   10617 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10618 
   10619 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10620 	}
   10621 
   10622 
   10623 
   10624 out:
   10625 	WM_CORE_UNLOCK(sc);
   10626 
   10627 	if (sc->sc_type == WM_T_82574) {
   10628 		if (!has_rxo)
   10629 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10630 		else
   10631 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10632 	} else if (sc->sc_type == WM_T_82575)
   10633 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10634 	else
   10635 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10636 
   10637 	return 1;
   10638 }
   10639 
   10640 /*
   10641  * Media related.
   10642  * GMII, SGMII, TBI (and SERDES)
   10643  */
   10644 
   10645 /* Common */
   10646 
   10647 /*
   10648  * wm_tbi_serdes_set_linkled:
   10649  *
   10650  *	Update the link LED on TBI and SERDES devices.
   10651  */
   10652 static void
   10653 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10654 {
   10655 
   10656 	if (sc->sc_tbi_linkup)
   10657 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10658 	else
   10659 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10660 
   10661 	/* 82540 or newer devices are active low */
   10662 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10663 
   10664 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10665 }
   10666 
   10667 /* GMII related */
   10668 
   10669 /*
   10670  * wm_gmii_reset:
   10671  *
   10672  *	Reset the PHY.
   10673  */
   10674 static void
   10675 wm_gmii_reset(struct wm_softc *sc)
   10676 {
   10677 	uint32_t reg;
   10678 	int rv;
   10679 
   10680 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10681 		device_xname(sc->sc_dev), __func__));
   10682 
   10683 	rv = sc->phy.acquire(sc);
   10684 	if (rv != 0) {
   10685 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10686 		    __func__);
   10687 		return;
   10688 	}
   10689 
   10690 	switch (sc->sc_type) {
   10691 	case WM_T_82542_2_0:
   10692 	case WM_T_82542_2_1:
   10693 		/* null */
   10694 		break;
   10695 	case WM_T_82543:
   10696 		/*
   10697 		 * With 82543, we need to force speed and duplex on the MAC
   10698 		 * equal to what the PHY speed and duplex configuration is.
   10699 		 * In addition, we need to perform a hardware reset on the PHY
   10700 		 * to take it out of reset.
   10701 		 */
   10702 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10703 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10704 
   10705 		/* The PHY reset pin is active-low. */
   10706 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10707 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10708 		    CTRL_EXT_SWDPIN(4));
   10709 		reg |= CTRL_EXT_SWDPIO(4);
   10710 
   10711 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10712 		CSR_WRITE_FLUSH(sc);
   10713 		delay(10*1000);
   10714 
   10715 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10716 		CSR_WRITE_FLUSH(sc);
   10717 		delay(150);
   10718 #if 0
   10719 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10720 #endif
   10721 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10722 		break;
   10723 	case WM_T_82544:	/* Reset 10000us */
   10724 	case WM_T_82540:
   10725 	case WM_T_82545:
   10726 	case WM_T_82545_3:
   10727 	case WM_T_82546:
   10728 	case WM_T_82546_3:
   10729 	case WM_T_82541:
   10730 	case WM_T_82541_2:
   10731 	case WM_T_82547:
   10732 	case WM_T_82547_2:
   10733 	case WM_T_82571:	/* Reset 100us */
   10734 	case WM_T_82572:
   10735 	case WM_T_82573:
   10736 	case WM_T_82574:
   10737 	case WM_T_82575:
   10738 	case WM_T_82576:
   10739 	case WM_T_82580:
   10740 	case WM_T_I350:
   10741 	case WM_T_I354:
   10742 	case WM_T_I210:
   10743 	case WM_T_I211:
   10744 	case WM_T_82583:
   10745 	case WM_T_80003:
   10746 		/* Generic reset */
   10747 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10748 		CSR_WRITE_FLUSH(sc);
   10749 		delay(20000);
   10750 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10751 		CSR_WRITE_FLUSH(sc);
   10752 		delay(20000);
   10753 
   10754 		if ((sc->sc_type == WM_T_82541)
   10755 		    || (sc->sc_type == WM_T_82541_2)
   10756 		    || (sc->sc_type == WM_T_82547)
   10757 		    || (sc->sc_type == WM_T_82547_2)) {
   10758 			/* Workaround for igp are done in igp_reset() */
   10759 			/* XXX add code to set LED after phy reset */
   10760 		}
   10761 		break;
   10762 	case WM_T_ICH8:
   10763 	case WM_T_ICH9:
   10764 	case WM_T_ICH10:
   10765 	case WM_T_PCH:
   10766 	case WM_T_PCH2:
   10767 	case WM_T_PCH_LPT:
   10768 	case WM_T_PCH_SPT:
   10769 	case WM_T_PCH_CNP:
   10770 		/* Generic reset */
   10771 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10772 		CSR_WRITE_FLUSH(sc);
   10773 		delay(100);
   10774 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10775 		CSR_WRITE_FLUSH(sc);
   10776 		delay(150);
   10777 		break;
   10778 	default:
   10779 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10780 		    __func__);
   10781 		break;
   10782 	}
   10783 
   10784 	sc->phy.release(sc);
   10785 
   10786 	/* get_cfg_done */
   10787 	wm_get_cfg_done(sc);
   10788 
   10789 	/* Extra setup */
   10790 	switch (sc->sc_type) {
   10791 	case WM_T_82542_2_0:
   10792 	case WM_T_82542_2_1:
   10793 	case WM_T_82543:
   10794 	case WM_T_82544:
   10795 	case WM_T_82540:
   10796 	case WM_T_82545:
   10797 	case WM_T_82545_3:
   10798 	case WM_T_82546:
   10799 	case WM_T_82546_3:
   10800 	case WM_T_82541_2:
   10801 	case WM_T_82547_2:
   10802 	case WM_T_82571:
   10803 	case WM_T_82572:
   10804 	case WM_T_82573:
   10805 	case WM_T_82574:
   10806 	case WM_T_82583:
   10807 	case WM_T_82575:
   10808 	case WM_T_82576:
   10809 	case WM_T_82580:
   10810 	case WM_T_I350:
   10811 	case WM_T_I354:
   10812 	case WM_T_I210:
   10813 	case WM_T_I211:
   10814 	case WM_T_80003:
   10815 		/* Null */
   10816 		break;
   10817 	case WM_T_82541:
   10818 	case WM_T_82547:
   10819 		/* XXX Configure actively LED after PHY reset */
   10820 		break;
   10821 	case WM_T_ICH8:
   10822 	case WM_T_ICH9:
   10823 	case WM_T_ICH10:
   10824 	case WM_T_PCH:
   10825 	case WM_T_PCH2:
   10826 	case WM_T_PCH_LPT:
   10827 	case WM_T_PCH_SPT:
   10828 	case WM_T_PCH_CNP:
   10829 		wm_phy_post_reset(sc);
   10830 		break;
   10831 	default:
   10832 		panic("%s: unknown type\n", __func__);
   10833 		break;
   10834 	}
   10835 }
   10836 
   10837 /*
   10838  * Set up sc_phytype and mii_{read|write}reg.
   10839  *
   10840  *  To identify PHY type, correct read/write function should be selected.
   10841  * To select correct read/write function, PCI ID or MAC type are required
   10842  * without accessing PHY registers.
   10843  *
   10844  *  On the first call of this function, PHY ID is not known yet. Check
   10845  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10846  * result might be incorrect.
   10847  *
   10848  *  In the second call, PHY OUI and model is used to identify PHY type.
   10849  * It might not be perfect because of the lack of compared entry, but it
   10850  * would be better than the first call.
   10851  *
   10852  *  If the detected new result and previous assumption is different,
   10853  * a diagnostic message will be printed.
   10854  */
   10855 static void
   10856 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10857     uint16_t phy_model)
   10858 {
   10859 	device_t dev = sc->sc_dev;
   10860 	struct mii_data *mii = &sc->sc_mii;
   10861 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10862 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10863 	mii_readreg_t new_readreg;
   10864 	mii_writereg_t new_writereg;
   10865 	bool dodiag = true;
   10866 
   10867 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10868 		device_xname(sc->sc_dev), __func__));
   10869 
   10870 	/*
   10871 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10872 	 * incorrect. So don't print diag output when it's 2nd call.
   10873 	 */
   10874 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10875 		dodiag = false;
   10876 
   10877 	if (mii->mii_readreg == NULL) {
   10878 		/*
   10879 		 *  This is the first call of this function. For ICH and PCH
   10880 		 * variants, it's difficult to determine the PHY access method
   10881 		 * by sc_type, so use the PCI product ID for some devices.
   10882 		 */
   10883 
   10884 		switch (sc->sc_pcidevid) {
   10885 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10886 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10887 			/* 82577 */
   10888 			new_phytype = WMPHY_82577;
   10889 			break;
   10890 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10891 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10892 			/* 82578 */
   10893 			new_phytype = WMPHY_82578;
   10894 			break;
   10895 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10896 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10897 			/* 82579 */
   10898 			new_phytype = WMPHY_82579;
   10899 			break;
   10900 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10901 		case PCI_PRODUCT_INTEL_82801I_BM:
   10902 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10903 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10904 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10905 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10906 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10907 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10908 			/* ICH8, 9, 10 with 82567 */
   10909 			new_phytype = WMPHY_BM;
   10910 			break;
   10911 		default:
   10912 			break;
   10913 		}
   10914 	} else {
   10915 		/* It's not the first call. Use PHY OUI and model */
   10916 		switch (phy_oui) {
   10917 		case MII_OUI_ATTANSIC: /* atphy(4) */
   10918 			switch (phy_model) {
   10919 			case MII_MODEL_ATTANSIC_AR8021:
   10920 				new_phytype = WMPHY_82578;
   10921 				break;
   10922 			default:
   10923 				break;
   10924 			}
   10925 			break;
   10926 		case MII_OUI_xxMARVELL:
   10927 			switch (phy_model) {
   10928 			case MII_MODEL_xxMARVELL_I210:
   10929 				new_phytype = WMPHY_I210;
   10930 				break;
   10931 			case MII_MODEL_xxMARVELL_E1011:
   10932 			case MII_MODEL_xxMARVELL_E1000_3:
   10933 			case MII_MODEL_xxMARVELL_E1000_5:
   10934 			case MII_MODEL_xxMARVELL_E1112:
   10935 				new_phytype = WMPHY_M88;
   10936 				break;
   10937 			case MII_MODEL_xxMARVELL_E1149:
   10938 				new_phytype = WMPHY_BM;
   10939 				break;
   10940 			case MII_MODEL_xxMARVELL_E1111:
   10941 			case MII_MODEL_xxMARVELL_I347:
   10942 			case MII_MODEL_xxMARVELL_E1512:
   10943 			case MII_MODEL_xxMARVELL_E1340M:
   10944 			case MII_MODEL_xxMARVELL_E1543:
   10945 				new_phytype = WMPHY_M88;
   10946 				break;
   10947 			case MII_MODEL_xxMARVELL_I82563:
   10948 				new_phytype = WMPHY_GG82563;
   10949 				break;
   10950 			default:
   10951 				break;
   10952 			}
   10953 			break;
   10954 		case MII_OUI_INTEL:
   10955 			switch (phy_model) {
   10956 			case MII_MODEL_INTEL_I82577:
   10957 				new_phytype = WMPHY_82577;
   10958 				break;
   10959 			case MII_MODEL_INTEL_I82579:
   10960 				new_phytype = WMPHY_82579;
   10961 				break;
   10962 			case MII_MODEL_INTEL_I217:
   10963 				new_phytype = WMPHY_I217;
   10964 				break;
   10965 			case MII_MODEL_INTEL_I82580:
   10966 				new_phytype = WMPHY_82580;
   10967 				break;
   10968 			case MII_MODEL_INTEL_I350:
   10969 				new_phytype = WMPHY_I350;
   10970 				break;
   10971 			default:
   10972 				break;
   10973 			}
   10974 			break;
   10975 		case MII_OUI_yyINTEL:
   10976 			switch (phy_model) {
   10977 			case MII_MODEL_yyINTEL_I82562G:
   10978 			case MII_MODEL_yyINTEL_I82562EM:
   10979 			case MII_MODEL_yyINTEL_I82562ET:
   10980 				new_phytype = WMPHY_IFE;
   10981 				break;
   10982 			case MII_MODEL_yyINTEL_IGP01E1000:
   10983 				new_phytype = WMPHY_IGP;
   10984 				break;
   10985 			case MII_MODEL_yyINTEL_I82566:
   10986 				new_phytype = WMPHY_IGP_3;
   10987 				break;
   10988 			default:
   10989 				break;
   10990 			}
   10991 			break;
   10992 		default:
   10993 			break;
   10994 		}
   10995 
   10996 		if (dodiag) {
   10997 			if (new_phytype == WMPHY_UNKNOWN)
   10998 				aprint_verbose_dev(dev,
   10999 				    "%s: Unknown PHY model. OUI=%06x, "
   11000 				    "model=%04x\n", __func__, phy_oui,
   11001 				    phy_model);
   11002 
   11003 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11004 			    && (sc->sc_phytype != new_phytype)) {
   11005 				aprint_error_dev(dev, "Previously assumed PHY "
   11006 				    "type(%u) was incorrect. PHY type from PHY"
   11007 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   11008 			}
   11009 		}
   11010 	}
   11011 
   11012 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   11013 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   11014 		/* SGMII */
   11015 		new_readreg = wm_sgmii_readreg;
   11016 		new_writereg = wm_sgmii_writereg;
   11017 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11018 		/* BM2 (phyaddr == 1) */
   11019 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11020 		    && (new_phytype != WMPHY_BM)
   11021 		    && (new_phytype != WMPHY_UNKNOWN))
   11022 			doubt_phytype = new_phytype;
   11023 		new_phytype = WMPHY_BM;
   11024 		new_readreg = wm_gmii_bm_readreg;
   11025 		new_writereg = wm_gmii_bm_writereg;
   11026 	} else if (sc->sc_type >= WM_T_PCH) {
   11027 		/* All PCH* use _hv_ */
   11028 		new_readreg = wm_gmii_hv_readreg;
   11029 		new_writereg = wm_gmii_hv_writereg;
   11030 	} else if (sc->sc_type >= WM_T_ICH8) {
   11031 		/* non-82567 ICH8, 9 and 10 */
   11032 		new_readreg = wm_gmii_i82544_readreg;
   11033 		new_writereg = wm_gmii_i82544_writereg;
   11034 	} else if (sc->sc_type >= WM_T_80003) {
   11035 		/* 80003 */
   11036 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11037 		    && (new_phytype != WMPHY_GG82563)
   11038 		    && (new_phytype != WMPHY_UNKNOWN))
   11039 			doubt_phytype = new_phytype;
   11040 		new_phytype = WMPHY_GG82563;
   11041 		new_readreg = wm_gmii_i80003_readreg;
   11042 		new_writereg = wm_gmii_i80003_writereg;
   11043 	} else if (sc->sc_type >= WM_T_I210) {
   11044 		/* I210 and I211 */
   11045 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11046 		    && (new_phytype != WMPHY_I210)
   11047 		    && (new_phytype != WMPHY_UNKNOWN))
   11048 			doubt_phytype = new_phytype;
   11049 		new_phytype = WMPHY_I210;
   11050 		new_readreg = wm_gmii_gs40g_readreg;
   11051 		new_writereg = wm_gmii_gs40g_writereg;
   11052 	} else if (sc->sc_type >= WM_T_82580) {
   11053 		/* 82580, I350 and I354 */
   11054 		new_readreg = wm_gmii_82580_readreg;
   11055 		new_writereg = wm_gmii_82580_writereg;
   11056 	} else if (sc->sc_type >= WM_T_82544) {
   11057 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   11058 		new_readreg = wm_gmii_i82544_readreg;
   11059 		new_writereg = wm_gmii_i82544_writereg;
   11060 	} else {
   11061 		new_readreg = wm_gmii_i82543_readreg;
   11062 		new_writereg = wm_gmii_i82543_writereg;
   11063 	}
   11064 
   11065 	if (new_phytype == WMPHY_BM) {
   11066 		/* All BM use _bm_ */
   11067 		new_readreg = wm_gmii_bm_readreg;
   11068 		new_writereg = wm_gmii_bm_writereg;
   11069 	}
   11070 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   11071 		/* All PCH* use _hv_ */
   11072 		new_readreg = wm_gmii_hv_readreg;
   11073 		new_writereg = wm_gmii_hv_writereg;
   11074 	}
   11075 
   11076 	/* Diag output */
   11077 	if (dodiag) {
   11078 		if (doubt_phytype != WMPHY_UNKNOWN)
   11079 			aprint_error_dev(dev, "Assumed new PHY type was "
   11080 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   11081 			    new_phytype);
   11082 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11083 		    && (sc->sc_phytype != new_phytype))
   11084 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   11085 			    "was incorrect. New PHY type = %u\n",
   11086 			    sc->sc_phytype, new_phytype);
   11087 
   11088 		if ((mii->mii_readreg != NULL) &&
   11089 		    (new_phytype == WMPHY_UNKNOWN))
   11090 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   11091 
   11092 		if ((mii->mii_readreg != NULL) &&
   11093 		    (mii->mii_readreg != new_readreg))
   11094 			aprint_error_dev(dev, "Previously assumed PHY "
   11095 			    "read/write function was incorrect.\n");
   11096 	}
   11097 
   11098 	/* Update now */
   11099 	sc->sc_phytype = new_phytype;
   11100 	mii->mii_readreg = new_readreg;
   11101 	mii->mii_writereg = new_writereg;
   11102 	if (new_readreg == wm_gmii_hv_readreg) {
   11103 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   11104 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   11105 	} else if (new_readreg == wm_sgmii_readreg) {
   11106 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   11107 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   11108 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   11109 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   11110 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   11111 	}
   11112 }
   11113 
   11114 /*
   11115  * wm_get_phy_id_82575:
   11116  *
   11117  * Return PHY ID. Return -1 if it failed.
   11118  */
   11119 static int
   11120 wm_get_phy_id_82575(struct wm_softc *sc)
   11121 {
   11122 	uint32_t reg;
   11123 	int phyid = -1;
   11124 
   11125 	/* XXX */
   11126 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11127 		return -1;
   11128 
   11129 	if (wm_sgmii_uses_mdio(sc)) {
   11130 		switch (sc->sc_type) {
   11131 		case WM_T_82575:
   11132 		case WM_T_82576:
   11133 			reg = CSR_READ(sc, WMREG_MDIC);
   11134 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   11135 			break;
   11136 		case WM_T_82580:
   11137 		case WM_T_I350:
   11138 		case WM_T_I354:
   11139 		case WM_T_I210:
   11140 		case WM_T_I211:
   11141 			reg = CSR_READ(sc, WMREG_MDICNFG);
   11142 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   11143 			break;
   11144 		default:
   11145 			return -1;
   11146 		}
   11147 	}
   11148 
   11149 	return phyid;
   11150 }
   11151 
   11152 /*
   11153  * wm_gmii_mediainit:
   11154  *
   11155  *	Initialize media for use on 1000BASE-T devices.
   11156  */
   11157 static void
   11158 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   11159 {
   11160 	device_t dev = sc->sc_dev;
   11161 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11162 	struct mii_data *mii = &sc->sc_mii;
   11163 
   11164 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11165 		device_xname(sc->sc_dev), __func__));
   11166 
   11167 	/* We have GMII. */
   11168 	sc->sc_flags |= WM_F_HAS_MII;
   11169 
   11170 	if (sc->sc_type == WM_T_80003)
   11171 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11172 	else
   11173 		sc->sc_tipg = TIPG_1000T_DFLT;
   11174 
   11175 	/*
   11176 	 * Let the chip set speed/duplex on its own based on
   11177 	 * signals from the PHY.
   11178 	 * XXXbouyer - I'm not sure this is right for the 80003,
   11179 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   11180 	 */
   11181 	sc->sc_ctrl |= CTRL_SLU;
   11182 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11183 
   11184 	/* Initialize our media structures and probe the GMII. */
   11185 	mii->mii_ifp = ifp;
   11186 
   11187 	mii->mii_statchg = wm_gmii_statchg;
   11188 
   11189 	/* get PHY control from SMBus to PCIe */
   11190 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   11191 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   11192 	    || (sc->sc_type == WM_T_PCH_CNP))
   11193 		wm_init_phy_workarounds_pchlan(sc);
   11194 
   11195 	wm_gmii_reset(sc);
   11196 
   11197 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11198 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   11199 	    wm_gmii_mediastatus, sc->sc_core_lock);
   11200 
   11201 	/* Setup internal SGMII PHY for SFP */
   11202 	wm_sgmii_sfp_preconfig(sc);
   11203 
   11204 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   11205 	    || (sc->sc_type == WM_T_82580)
   11206 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   11207 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   11208 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   11209 			/* Attach only one port */
   11210 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   11211 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11212 		} else {
   11213 			int i, id;
   11214 			uint32_t ctrl_ext;
   11215 
   11216 			id = wm_get_phy_id_82575(sc);
   11217 			if (id != -1) {
   11218 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   11219 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   11220 			}
   11221 			if ((id == -1)
   11222 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11223 				/* Power on sgmii phy if it is disabled */
   11224 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11225 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   11226 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   11227 				CSR_WRITE_FLUSH(sc);
   11228 				delay(300*1000); /* XXX too long */
   11229 
   11230 				/*
   11231 				 * From 1 to 8.
   11232 				 *
   11233 				 * I2C access fails with I2C register's ERROR
   11234 				 * bit set, so prevent error message while
   11235 				 * scanning.
   11236 				 */
   11237 				sc->phy.no_errprint = true;
   11238 				for (i = 1; i < 8; i++)
   11239 					mii_attach(sc->sc_dev, &sc->sc_mii,
   11240 					    0xffffffff, i, MII_OFFSET_ANY,
   11241 					    MIIF_DOPAUSE);
   11242 				sc->phy.no_errprint = false;
   11243 
   11244 				/* Restore previous sfp cage power state */
   11245 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11246 			}
   11247 		}
   11248 	} else
   11249 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11250 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11251 
   11252 	/*
   11253 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   11254 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   11255 	 */
   11256 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   11257 		|| (sc->sc_type == WM_T_PCH_SPT)
   11258 		|| (sc->sc_type == WM_T_PCH_CNP))
   11259 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11260 		wm_set_mdio_slow_mode_hv(sc);
   11261 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11262 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11263 	}
   11264 
   11265 	/*
   11266 	 * (For ICH8 variants)
   11267 	 * If PHY detection failed, use BM's r/w function and retry.
   11268 	 */
   11269 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11270 		/* if failed, retry with *_bm_* */
   11271 		aprint_verbose_dev(dev, "Assumed PHY access function "
   11272 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   11273 		    sc->sc_phytype);
   11274 		sc->sc_phytype = WMPHY_BM;
   11275 		mii->mii_readreg = wm_gmii_bm_readreg;
   11276 		mii->mii_writereg = wm_gmii_bm_writereg;
   11277 
   11278 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11279 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11280 	}
   11281 
   11282 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11283 		/* Any PHY wasn't found */
   11284 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   11285 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   11286 		sc->sc_phytype = WMPHY_NONE;
   11287 	} else {
   11288 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   11289 
   11290 		/*
   11291 		 * PHY found! Check PHY type again by the second call of
   11292 		 * wm_gmii_setup_phytype.
   11293 		 */
   11294 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   11295 		    child->mii_mpd_model);
   11296 
   11297 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   11298 	}
   11299 }
   11300 
   11301 /*
   11302  * wm_gmii_mediachange:	[ifmedia interface function]
   11303  *
   11304  *	Set hardware to newly-selected media on a 1000BASE-T device.
   11305  */
   11306 static int
   11307 wm_gmii_mediachange(struct ifnet *ifp)
   11308 {
   11309 	struct wm_softc *sc = ifp->if_softc;
   11310 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11311 	uint32_t reg;
   11312 	int rc;
   11313 
   11314 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11315 		device_xname(sc->sc_dev), __func__));
   11316 
   11317 	KASSERT(WM_CORE_LOCKED(sc));
   11318 
   11319 	if ((sc->sc_if_flags & IFF_UP) == 0)
   11320 		return 0;
   11321 
   11322 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   11323 	if ((sc->sc_type == WM_T_82580)
   11324 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   11325 	    || (sc->sc_type == WM_T_I211)) {
   11326 		reg = CSR_READ(sc, WMREG_PHPM);
   11327 		reg &= ~PHPM_GO_LINK_D;
   11328 		CSR_WRITE(sc, WMREG_PHPM, reg);
   11329 	}
   11330 
   11331 	/* Disable D0 LPLU. */
   11332 	wm_lplu_d0_disable(sc);
   11333 
   11334 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   11335 	sc->sc_ctrl |= CTRL_SLU;
   11336 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11337 	    || (sc->sc_type > WM_T_82543)) {
   11338 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   11339 	} else {
   11340 		sc->sc_ctrl &= ~CTRL_ASDE;
   11341 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11342 		if (ife->ifm_media & IFM_FDX)
   11343 			sc->sc_ctrl |= CTRL_FD;
   11344 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   11345 		case IFM_10_T:
   11346 			sc->sc_ctrl |= CTRL_SPEED_10;
   11347 			break;
   11348 		case IFM_100_TX:
   11349 			sc->sc_ctrl |= CTRL_SPEED_100;
   11350 			break;
   11351 		case IFM_1000_T:
   11352 			sc->sc_ctrl |= CTRL_SPEED_1000;
   11353 			break;
   11354 		case IFM_NONE:
   11355 			/* There is no specific setting for IFM_NONE */
   11356 			break;
   11357 		default:
   11358 			panic("wm_gmii_mediachange: bad media 0x%x",
   11359 			    ife->ifm_media);
   11360 		}
   11361 	}
   11362 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11363 	CSR_WRITE_FLUSH(sc);
   11364 
   11365 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11366 		wm_serdes_mediachange(ifp);
   11367 
   11368 	if (sc->sc_type <= WM_T_82543)
   11369 		wm_gmii_reset(sc);
   11370 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   11371 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   11372 		/* allow time for SFP cage time to power up phy */
   11373 		delay(300 * 1000);
   11374 		wm_gmii_reset(sc);
   11375 	}
   11376 
   11377 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   11378 		return 0;
   11379 	return rc;
   11380 }
   11381 
   11382 /*
   11383  * wm_gmii_mediastatus:	[ifmedia interface function]
   11384  *
   11385  *	Get the current interface media status on a 1000BASE-T device.
   11386  */
   11387 static void
   11388 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11389 {
   11390 	struct wm_softc *sc = ifp->if_softc;
   11391 
   11392 	KASSERT(WM_CORE_LOCKED(sc));
   11393 
   11394 	ether_mediastatus(ifp, ifmr);
   11395 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11396 	    | sc->sc_flowflags;
   11397 }
   11398 
   11399 #define	MDI_IO		CTRL_SWDPIN(2)
   11400 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   11401 #define	MDI_CLK		CTRL_SWDPIN(3)
   11402 
   11403 static void
   11404 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   11405 {
   11406 	uint32_t i, v;
   11407 
   11408 	v = CSR_READ(sc, WMREG_CTRL);
   11409 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11410 	v |= MDI_DIR | CTRL_SWDPIO(3);
   11411 
   11412 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   11413 		if (data & i)
   11414 			v |= MDI_IO;
   11415 		else
   11416 			v &= ~MDI_IO;
   11417 		CSR_WRITE(sc, WMREG_CTRL, v);
   11418 		CSR_WRITE_FLUSH(sc);
   11419 		delay(10);
   11420 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11421 		CSR_WRITE_FLUSH(sc);
   11422 		delay(10);
   11423 		CSR_WRITE(sc, WMREG_CTRL, v);
   11424 		CSR_WRITE_FLUSH(sc);
   11425 		delay(10);
   11426 	}
   11427 }
   11428 
   11429 static uint16_t
   11430 wm_i82543_mii_recvbits(struct wm_softc *sc)
   11431 {
   11432 	uint32_t v, i;
   11433 	uint16_t data = 0;
   11434 
   11435 	v = CSR_READ(sc, WMREG_CTRL);
   11436 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11437 	v |= CTRL_SWDPIO(3);
   11438 
   11439 	CSR_WRITE(sc, WMREG_CTRL, v);
   11440 	CSR_WRITE_FLUSH(sc);
   11441 	delay(10);
   11442 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11443 	CSR_WRITE_FLUSH(sc);
   11444 	delay(10);
   11445 	CSR_WRITE(sc, WMREG_CTRL, v);
   11446 	CSR_WRITE_FLUSH(sc);
   11447 	delay(10);
   11448 
   11449 	for (i = 0; i < 16; i++) {
   11450 		data <<= 1;
   11451 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11452 		CSR_WRITE_FLUSH(sc);
   11453 		delay(10);
   11454 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   11455 			data |= 1;
   11456 		CSR_WRITE(sc, WMREG_CTRL, v);
   11457 		CSR_WRITE_FLUSH(sc);
   11458 		delay(10);
   11459 	}
   11460 
   11461 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11462 	CSR_WRITE_FLUSH(sc);
   11463 	delay(10);
   11464 	CSR_WRITE(sc, WMREG_CTRL, v);
   11465 	CSR_WRITE_FLUSH(sc);
   11466 	delay(10);
   11467 
   11468 	return data;
   11469 }
   11470 
   11471 #undef MDI_IO
   11472 #undef MDI_DIR
   11473 #undef MDI_CLK
   11474 
   11475 /*
   11476  * wm_gmii_i82543_readreg:	[mii interface function]
   11477  *
   11478  *	Read a PHY register on the GMII (i82543 version).
   11479  */
   11480 static int
   11481 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11482 {
   11483 	struct wm_softc *sc = device_private(dev);
   11484 
   11485 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11486 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   11487 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   11488 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   11489 
   11490 	DPRINTF(sc, WM_DEBUG_GMII,
   11491 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   11492 		device_xname(dev), phy, reg, *val));
   11493 
   11494 	return 0;
   11495 }
   11496 
   11497 /*
   11498  * wm_gmii_i82543_writereg:	[mii interface function]
   11499  *
   11500  *	Write a PHY register on the GMII (i82543 version).
   11501  */
   11502 static int
   11503 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   11504 {
   11505 	struct wm_softc *sc = device_private(dev);
   11506 
   11507 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11508 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   11509 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   11510 	    (MII_COMMAND_START << 30), 32);
   11511 
   11512 	return 0;
   11513 }
   11514 
   11515 /*
   11516  * wm_gmii_mdic_readreg:	[mii interface function]
   11517  *
   11518  *	Read a PHY register on the GMII.
   11519  */
   11520 static int
   11521 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11522 {
   11523 	struct wm_softc *sc = device_private(dev);
   11524 	uint32_t mdic = 0;
   11525 	int i;
   11526 
   11527 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11528 	    && (reg > MII_ADDRMASK)) {
   11529 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11530 		    __func__, sc->sc_phytype, reg);
   11531 		reg &= MII_ADDRMASK;
   11532 	}
   11533 
   11534 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   11535 	    MDIC_REGADD(reg));
   11536 
   11537 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11538 		delay(50);
   11539 		mdic = CSR_READ(sc, WMREG_MDIC);
   11540 		if (mdic & MDIC_READY)
   11541 			break;
   11542 	}
   11543 
   11544 	if ((mdic & MDIC_READY) == 0) {
   11545 		DPRINTF(sc, WM_DEBUG_GMII,
   11546 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11547 			device_xname(dev), phy, reg));
   11548 		return ETIMEDOUT;
   11549 	} else if (mdic & MDIC_E) {
   11550 		/* This is normal if no PHY is present. */
   11551 		DPRINTF(sc, WM_DEBUG_GMII,
   11552 		    ("%s: MDIC read error: phy %d reg %d\n",
   11553 			device_xname(sc->sc_dev), phy, reg));
   11554 		return -1;
   11555 	} else
   11556 		*val = MDIC_DATA(mdic);
   11557 
   11558 	/*
   11559 	 * Allow some time after each MDIC transaction to avoid
   11560 	 * reading duplicate data in the next MDIC transaction.
   11561 	 */
   11562 	if (sc->sc_type == WM_T_PCH2)
   11563 		delay(100);
   11564 
   11565 	return 0;
   11566 }
   11567 
   11568 /*
   11569  * wm_gmii_mdic_writereg:	[mii interface function]
   11570  *
   11571  *	Write a PHY register on the GMII.
   11572  */
   11573 static int
   11574 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11575 {
   11576 	struct wm_softc *sc = device_private(dev);
   11577 	uint32_t mdic = 0;
   11578 	int i;
   11579 
   11580 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11581 	    && (reg > MII_ADDRMASK)) {
   11582 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11583 		    __func__, sc->sc_phytype, reg);
   11584 		reg &= MII_ADDRMASK;
   11585 	}
   11586 
   11587 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11588 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11589 
   11590 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11591 		delay(50);
   11592 		mdic = CSR_READ(sc, WMREG_MDIC);
   11593 		if (mdic & MDIC_READY)
   11594 			break;
   11595 	}
   11596 
   11597 	if ((mdic & MDIC_READY) == 0) {
   11598 		DPRINTF(sc, WM_DEBUG_GMII,
   11599 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11600 			device_xname(dev), phy, reg));
   11601 		return ETIMEDOUT;
   11602 	} else if (mdic & MDIC_E) {
   11603 		DPRINTF(sc, WM_DEBUG_GMII,
   11604 		    ("%s: MDIC write error: phy %d reg %d\n",
   11605 			device_xname(dev), phy, reg));
   11606 		return -1;
   11607 	}
   11608 
   11609 	/*
   11610 	 * Allow some time after each MDIC transaction to avoid
   11611 	 * reading duplicate data in the next MDIC transaction.
   11612 	 */
   11613 	if (sc->sc_type == WM_T_PCH2)
   11614 		delay(100);
   11615 
   11616 	return 0;
   11617 }
   11618 
   11619 /*
   11620  * wm_gmii_i82544_readreg:	[mii interface function]
   11621  *
   11622  *	Read a PHY register on the GMII.
   11623  */
   11624 static int
   11625 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11626 {
   11627 	struct wm_softc *sc = device_private(dev);
   11628 	int rv;
   11629 
   11630 	rv = sc->phy.acquire(sc);
   11631 	if (rv != 0) {
   11632 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11633 		return rv;
   11634 	}
   11635 
   11636 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11637 
   11638 	sc->phy.release(sc);
   11639 
   11640 	return rv;
   11641 }
   11642 
   11643 static int
   11644 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11645 {
   11646 	struct wm_softc *sc = device_private(dev);
   11647 	int rv;
   11648 
   11649 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11650 		switch (sc->sc_phytype) {
   11651 		case WMPHY_IGP:
   11652 		case WMPHY_IGP_2:
   11653 		case WMPHY_IGP_3:
   11654 			rv = wm_gmii_mdic_writereg(dev, phy,
   11655 			    IGPHY_PAGE_SELECT, reg);
   11656 			if (rv != 0)
   11657 				return rv;
   11658 			break;
   11659 		default:
   11660 #ifdef WM_DEBUG
   11661 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11662 			    __func__, sc->sc_phytype, reg);
   11663 #endif
   11664 			break;
   11665 		}
   11666 	}
   11667 
   11668 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11669 }
   11670 
   11671 /*
   11672  * wm_gmii_i82544_writereg:	[mii interface function]
   11673  *
   11674  *	Write a PHY register on the GMII.
   11675  */
   11676 static int
   11677 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11678 {
   11679 	struct wm_softc *sc = device_private(dev);
   11680 	int rv;
   11681 
   11682 	rv = sc->phy.acquire(sc);
   11683 	if (rv != 0) {
   11684 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11685 		return rv;
   11686 	}
   11687 
   11688 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11689 	sc->phy.release(sc);
   11690 
   11691 	return rv;
   11692 }
   11693 
   11694 static int
   11695 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11696 {
   11697 	struct wm_softc *sc = device_private(dev);
   11698 	int rv;
   11699 
   11700 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11701 		switch (sc->sc_phytype) {
   11702 		case WMPHY_IGP:
   11703 		case WMPHY_IGP_2:
   11704 		case WMPHY_IGP_3:
   11705 			rv = wm_gmii_mdic_writereg(dev, phy,
   11706 			    IGPHY_PAGE_SELECT, reg);
   11707 			if (rv != 0)
   11708 				return rv;
   11709 			break;
   11710 		default:
   11711 #ifdef WM_DEBUG
   11712 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11713 			    __func__, sc->sc_phytype, reg);
   11714 #endif
   11715 			break;
   11716 		}
   11717 	}
   11718 
   11719 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11720 }
   11721 
   11722 /*
   11723  * wm_gmii_i80003_readreg:	[mii interface function]
   11724  *
   11725  *	Read a PHY register on the kumeran
   11726  * This could be handled by the PHY layer if we didn't have to lock the
   11727  * resource ...
   11728  */
   11729 static int
   11730 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11731 {
   11732 	struct wm_softc *sc = device_private(dev);
   11733 	int page_select;
   11734 	uint16_t temp, temp2;
   11735 	int rv;
   11736 
   11737 	if (phy != 1) /* Only one PHY on kumeran bus */
   11738 		return -1;
   11739 
   11740 	rv = sc->phy.acquire(sc);
   11741 	if (rv != 0) {
   11742 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11743 		return rv;
   11744 	}
   11745 
   11746 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11747 		page_select = GG82563_PHY_PAGE_SELECT;
   11748 	else {
   11749 		/*
   11750 		 * Use Alternative Page Select register to access registers
   11751 		 * 30 and 31.
   11752 		 */
   11753 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11754 	}
   11755 	temp = reg >> GG82563_PAGE_SHIFT;
   11756 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11757 		goto out;
   11758 
   11759 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11760 		/*
   11761 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11762 		 * register.
   11763 		 */
   11764 		delay(200);
   11765 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11766 		if ((rv != 0) || (temp2 != temp)) {
   11767 			device_printf(dev, "%s failed\n", __func__);
   11768 			rv = -1;
   11769 			goto out;
   11770 		}
   11771 		delay(200);
   11772 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11773 		delay(200);
   11774 	} else
   11775 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11776 
   11777 out:
   11778 	sc->phy.release(sc);
   11779 	return rv;
   11780 }
   11781 
   11782 /*
   11783  * wm_gmii_i80003_writereg:	[mii interface function]
   11784  *
   11785  *	Write a PHY register on the kumeran.
   11786  * This could be handled by the PHY layer if we didn't have to lock the
   11787  * resource ...
   11788  */
   11789 static int
   11790 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11791 {
   11792 	struct wm_softc *sc = device_private(dev);
   11793 	int page_select, rv;
   11794 	uint16_t temp, temp2;
   11795 
   11796 	if (phy != 1) /* Only one PHY on kumeran bus */
   11797 		return -1;
   11798 
   11799 	rv = sc->phy.acquire(sc);
   11800 	if (rv != 0) {
   11801 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11802 		return rv;
   11803 	}
   11804 
   11805 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11806 		page_select = GG82563_PHY_PAGE_SELECT;
   11807 	else {
   11808 		/*
   11809 		 * Use Alternative Page Select register to access registers
   11810 		 * 30 and 31.
   11811 		 */
   11812 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11813 	}
   11814 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11815 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11816 		goto out;
   11817 
   11818 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11819 		/*
   11820 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11821 		 * register.
   11822 		 */
   11823 		delay(200);
   11824 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11825 		if ((rv != 0) || (temp2 != temp)) {
   11826 			device_printf(dev, "%s failed\n", __func__);
   11827 			rv = -1;
   11828 			goto out;
   11829 		}
   11830 		delay(200);
   11831 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11832 		delay(200);
   11833 	} else
   11834 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11835 
   11836 out:
   11837 	sc->phy.release(sc);
   11838 	return rv;
   11839 }
   11840 
   11841 /*
   11842  * wm_gmii_bm_readreg:	[mii interface function]
   11843  *
   11844  *	Read a PHY register on the kumeran
   11845  * This could be handled by the PHY layer if we didn't have to lock the
   11846  * resource ...
   11847  */
   11848 static int
   11849 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11850 {
   11851 	struct wm_softc *sc = device_private(dev);
   11852 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11853 	int rv;
   11854 
   11855 	rv = sc->phy.acquire(sc);
   11856 	if (rv != 0) {
   11857 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11858 		return rv;
   11859 	}
   11860 
   11861 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11862 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11863 		    || (reg == 31)) ? 1 : phy;
   11864 	/* Page 800 works differently than the rest so it has its own func */
   11865 	if (page == BM_WUC_PAGE) {
   11866 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11867 		goto release;
   11868 	}
   11869 
   11870 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11871 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11872 		    && (sc->sc_type != WM_T_82583))
   11873 			rv = wm_gmii_mdic_writereg(dev, phy,
   11874 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11875 		else
   11876 			rv = wm_gmii_mdic_writereg(dev, phy,
   11877 			    BME1000_PHY_PAGE_SELECT, page);
   11878 		if (rv != 0)
   11879 			goto release;
   11880 	}
   11881 
   11882 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11883 
   11884 release:
   11885 	sc->phy.release(sc);
   11886 	return rv;
   11887 }
   11888 
   11889 /*
   11890  * wm_gmii_bm_writereg:	[mii interface function]
   11891  *
   11892  *	Write a PHY register on the kumeran.
   11893  * This could be handled by the PHY layer if we didn't have to lock the
   11894  * resource ...
   11895  */
   11896 static int
   11897 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11898 {
   11899 	struct wm_softc *sc = device_private(dev);
   11900 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11901 	int rv;
   11902 
   11903 	rv = sc->phy.acquire(sc);
   11904 	if (rv != 0) {
   11905 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11906 		return rv;
   11907 	}
   11908 
   11909 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11910 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11911 		    || (reg == 31)) ? 1 : phy;
   11912 	/* Page 800 works differently than the rest so it has its own func */
   11913 	if (page == BM_WUC_PAGE) {
   11914 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11915 		goto release;
   11916 	}
   11917 
   11918 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11919 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11920 		    && (sc->sc_type != WM_T_82583))
   11921 			rv = wm_gmii_mdic_writereg(dev, phy,
   11922 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11923 		else
   11924 			rv = wm_gmii_mdic_writereg(dev, phy,
   11925 			    BME1000_PHY_PAGE_SELECT, page);
   11926 		if (rv != 0)
   11927 			goto release;
   11928 	}
   11929 
   11930 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11931 
   11932 release:
   11933 	sc->phy.release(sc);
   11934 	return rv;
   11935 }
   11936 
   11937 /*
   11938  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11939  *  @dev: pointer to the HW structure
   11940  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11941  *
   11942  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11943  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11944  */
   11945 static int
   11946 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11947 {
   11948 #ifdef WM_DEBUG
   11949 	struct wm_softc *sc = device_private(dev);
   11950 #endif
   11951 	uint16_t temp;
   11952 	int rv;
   11953 
   11954 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11955 		device_xname(dev), __func__));
   11956 
   11957 	if (!phy_regp)
   11958 		return -1;
   11959 
   11960 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11961 
   11962 	/* Select Port Control Registers page */
   11963 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11964 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11965 	if (rv != 0)
   11966 		return rv;
   11967 
   11968 	/* Read WUCE and save it */
   11969 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11970 	if (rv != 0)
   11971 		return rv;
   11972 
   11973 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11974 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11975 	 */
   11976 	temp = *phy_regp;
   11977 	temp |= BM_WUC_ENABLE_BIT;
   11978 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11979 
   11980 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11981 		return rv;
   11982 
   11983 	/* Select Host Wakeup Registers page - caller now able to write
   11984 	 * registers on the Wakeup registers page
   11985 	 */
   11986 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11987 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11988 }
   11989 
   11990 /*
   11991  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11992  *  @dev: pointer to the HW structure
   11993  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11994  *
   11995  *  Restore BM_WUC_ENABLE_REG to its original value.
   11996  *
   11997  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11998  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11999  *  caller.
   12000  */
   12001 static int
   12002 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12003 {
   12004 #ifdef WM_DEBUG
   12005 	struct wm_softc *sc = device_private(dev);
   12006 #endif
   12007 
   12008 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12009 		device_xname(dev), __func__));
   12010 
   12011 	if (!phy_regp)
   12012 		return -1;
   12013 
   12014 	/* Select Port Control Registers page */
   12015 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12016 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12017 
   12018 	/* Restore 769.17 to its original value */
   12019 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   12020 
   12021 	return 0;
   12022 }
   12023 
   12024 /*
   12025  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   12026  *  @sc: pointer to the HW structure
   12027  *  @offset: register offset to be read or written
   12028  *  @val: pointer to the data to read or write
   12029  *  @rd: determines if operation is read or write
   12030  *  @page_set: BM_WUC_PAGE already set and access enabled
   12031  *
   12032  *  Read the PHY register at offset and store the retrieved information in
   12033  *  data, or write data to PHY register at offset.  Note the procedure to
   12034  *  access the PHY wakeup registers is different than reading the other PHY
   12035  *  registers. It works as such:
   12036  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   12037  *  2) Set page to 800 for host (801 if we were manageability)
   12038  *  3) Write the address using the address opcode (0x11)
   12039  *  4) Read or write the data using the data opcode (0x12)
   12040  *  5) Restore 769.17.2 to its original value
   12041  *
   12042  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   12043  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   12044  *
   12045  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   12046  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   12047  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   12048  */
   12049 static int
   12050 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   12051 	bool page_set)
   12052 {
   12053 	struct wm_softc *sc = device_private(dev);
   12054 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   12055 	uint16_t page = BM_PHY_REG_PAGE(offset);
   12056 	uint16_t wuce;
   12057 	int rv = 0;
   12058 
   12059 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12060 		device_xname(dev), __func__));
   12061 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   12062 	if ((sc->sc_type == WM_T_PCH)
   12063 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   12064 		device_printf(dev,
   12065 		    "Attempting to access page %d while gig enabled.\n", page);
   12066 	}
   12067 
   12068 	if (!page_set) {
   12069 		/* Enable access to PHY wakeup registers */
   12070 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   12071 		if (rv != 0) {
   12072 			device_printf(dev,
   12073 			    "%s: Could not enable PHY wakeup reg access\n",
   12074 			    __func__);
   12075 			return rv;
   12076 		}
   12077 	}
   12078 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   12079 		device_xname(sc->sc_dev), __func__, page, regnum));
   12080 
   12081 	/*
   12082 	 * 2) Access PHY wakeup register.
   12083 	 * See wm_access_phy_wakeup_reg_bm.
   12084 	 */
   12085 
   12086 	/* Write the Wakeup register page offset value using opcode 0x11 */
   12087 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   12088 	if (rv != 0)
   12089 		return rv;
   12090 
   12091 	if (rd) {
   12092 		/* Read the Wakeup register page value using opcode 0x12 */
   12093 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   12094 	} else {
   12095 		/* Write the Wakeup register page value using opcode 0x12 */
   12096 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   12097 	}
   12098 	if (rv != 0)
   12099 		return rv;
   12100 
   12101 	if (!page_set)
   12102 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   12103 
   12104 	return rv;
   12105 }
   12106 
   12107 /*
   12108  * wm_gmii_hv_readreg:	[mii interface function]
   12109  *
   12110  *	Read a PHY register on the kumeran
   12111  * This could be handled by the PHY layer if we didn't have to lock the
   12112  * resource ...
   12113  */
   12114 static int
   12115 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12116 {
   12117 	struct wm_softc *sc = device_private(dev);
   12118 	int rv;
   12119 
   12120 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12121 		device_xname(dev), __func__));
   12122 
   12123 	rv = sc->phy.acquire(sc);
   12124 	if (rv != 0) {
   12125 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12126 		return rv;
   12127 	}
   12128 
   12129 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   12130 	sc->phy.release(sc);
   12131 	return rv;
   12132 }
   12133 
   12134 static int
   12135 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12136 {
   12137 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12138 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12139 	int rv;
   12140 
   12141 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12142 
   12143 	/* Page 800 works differently than the rest so it has its own func */
   12144 	if (page == BM_WUC_PAGE)
   12145 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12146 
   12147 	/*
   12148 	 * Lower than page 768 works differently than the rest so it has its
   12149 	 * own func
   12150 	 */
   12151 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12152 		device_printf(dev, "gmii_hv_readreg!!!\n");
   12153 		return -1;
   12154 	}
   12155 
   12156 	/*
   12157 	 * XXX I21[789] documents say that the SMBus Address register is at
   12158 	 * PHY address 01, Page 0 (not 768), Register 26.
   12159 	 */
   12160 	if (page == HV_INTC_FC_PAGE_START)
   12161 		page = 0;
   12162 
   12163 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12164 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12165 		    page << BME1000_PAGE_SHIFT);
   12166 		if (rv != 0)
   12167 			return rv;
   12168 	}
   12169 
   12170 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   12171 }
   12172 
   12173 /*
   12174  * wm_gmii_hv_writereg:	[mii interface function]
   12175  *
   12176  *	Write a PHY register on the kumeran.
   12177  * This could be handled by the PHY layer if we didn't have to lock the
   12178  * resource ...
   12179  */
   12180 static int
   12181 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   12182 {
   12183 	struct wm_softc *sc = device_private(dev);
   12184 	int rv;
   12185 
   12186 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12187 		device_xname(dev), __func__));
   12188 
   12189 	rv = sc->phy.acquire(sc);
   12190 	if (rv != 0) {
   12191 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12192 		return rv;
   12193 	}
   12194 
   12195 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   12196 	sc->phy.release(sc);
   12197 
   12198 	return rv;
   12199 }
   12200 
   12201 static int
   12202 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12203 {
   12204 	struct wm_softc *sc = device_private(dev);
   12205 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12206 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12207 	int rv;
   12208 
   12209 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12210 
   12211 	/* Page 800 works differently than the rest so it has its own func */
   12212 	if (page == BM_WUC_PAGE)
   12213 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   12214 		    false);
   12215 
   12216 	/*
   12217 	 * Lower than page 768 works differently than the rest so it has its
   12218 	 * own func
   12219 	 */
   12220 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12221 		device_printf(dev, "gmii_hv_writereg!!!\n");
   12222 		return -1;
   12223 	}
   12224 
   12225 	{
   12226 		/*
   12227 		 * XXX I21[789] documents say that the SMBus Address register
   12228 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   12229 		 */
   12230 		if (page == HV_INTC_FC_PAGE_START)
   12231 			page = 0;
   12232 
   12233 		/*
   12234 		 * XXX Workaround MDIO accesses being disabled after entering
   12235 		 * IEEE Power Down (whenever bit 11 of the PHY control
   12236 		 * register is set)
   12237 		 */
   12238 		if (sc->sc_phytype == WMPHY_82578) {
   12239 			struct mii_softc *child;
   12240 
   12241 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12242 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   12243 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   12244 			    && ((val & (1 << 11)) != 0)) {
   12245 				device_printf(dev, "XXX need workaround\n");
   12246 			}
   12247 		}
   12248 
   12249 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12250 			rv = wm_gmii_mdic_writereg(dev, 1,
   12251 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12252 			if (rv != 0)
   12253 				return rv;
   12254 		}
   12255 	}
   12256 
   12257 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   12258 }
   12259 
   12260 /*
   12261  * wm_gmii_82580_readreg:	[mii interface function]
   12262  *
   12263  *	Read a PHY register on the 82580 and I350.
   12264  * This could be handled by the PHY layer if we didn't have to lock the
   12265  * resource ...
   12266  */
   12267 static int
   12268 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12269 {
   12270 	struct wm_softc *sc = device_private(dev);
   12271 	int rv;
   12272 
   12273 	rv = sc->phy.acquire(sc);
   12274 	if (rv != 0) {
   12275 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12276 		return rv;
   12277 	}
   12278 
   12279 #ifdef DIAGNOSTIC
   12280 	if (reg > MII_ADDRMASK) {
   12281 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12282 		    __func__, sc->sc_phytype, reg);
   12283 		reg &= MII_ADDRMASK;
   12284 	}
   12285 #endif
   12286 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   12287 
   12288 	sc->phy.release(sc);
   12289 	return rv;
   12290 }
   12291 
   12292 /*
   12293  * wm_gmii_82580_writereg:	[mii interface function]
   12294  *
   12295  *	Write a PHY register on the 82580 and I350.
   12296  * This could be handled by the PHY layer if we didn't have to lock the
   12297  * resource ...
   12298  */
   12299 static int
   12300 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   12301 {
   12302 	struct wm_softc *sc = device_private(dev);
   12303 	int rv;
   12304 
   12305 	rv = sc->phy.acquire(sc);
   12306 	if (rv != 0) {
   12307 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12308 		return rv;
   12309 	}
   12310 
   12311 #ifdef DIAGNOSTIC
   12312 	if (reg > MII_ADDRMASK) {
   12313 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12314 		    __func__, sc->sc_phytype, reg);
   12315 		reg &= MII_ADDRMASK;
   12316 	}
   12317 #endif
   12318 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   12319 
   12320 	sc->phy.release(sc);
   12321 	return rv;
   12322 }
   12323 
   12324 /*
   12325  * wm_gmii_gs40g_readreg:	[mii interface function]
   12326  *
   12327  *	Read a PHY register on the I2100 and I211.
   12328  * This could be handled by the PHY layer if we didn't have to lock the
   12329  * resource ...
   12330  */
   12331 static int
   12332 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12333 {
   12334 	struct wm_softc *sc = device_private(dev);
   12335 	int page, offset;
   12336 	int rv;
   12337 
   12338 	/* Acquire semaphore */
   12339 	rv = sc->phy.acquire(sc);
   12340 	if (rv != 0) {
   12341 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12342 		return rv;
   12343 	}
   12344 
   12345 	/* Page select */
   12346 	page = reg >> GS40G_PAGE_SHIFT;
   12347 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12348 	if (rv != 0)
   12349 		goto release;
   12350 
   12351 	/* Read reg */
   12352 	offset = reg & GS40G_OFFSET_MASK;
   12353 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   12354 
   12355 release:
   12356 	sc->phy.release(sc);
   12357 	return rv;
   12358 }
   12359 
   12360 /*
   12361  * wm_gmii_gs40g_writereg:	[mii interface function]
   12362  *
   12363  *	Write a PHY register on the I210 and I211.
   12364  * This could be handled by the PHY layer if we didn't have to lock the
   12365  * resource ...
   12366  */
   12367 static int
   12368 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   12369 {
   12370 	struct wm_softc *sc = device_private(dev);
   12371 	uint16_t page;
   12372 	int offset, rv;
   12373 
   12374 	/* Acquire semaphore */
   12375 	rv = sc->phy.acquire(sc);
   12376 	if (rv != 0) {
   12377 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12378 		return rv;
   12379 	}
   12380 
   12381 	/* Page select */
   12382 	page = reg >> GS40G_PAGE_SHIFT;
   12383 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12384 	if (rv != 0)
   12385 		goto release;
   12386 
   12387 	/* Write reg */
   12388 	offset = reg & GS40G_OFFSET_MASK;
   12389 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   12390 
   12391 release:
   12392 	/* Release semaphore */
   12393 	sc->phy.release(sc);
   12394 	return rv;
   12395 }
   12396 
   12397 /*
   12398  * wm_gmii_statchg:	[mii interface function]
   12399  *
   12400  *	Callback from MII layer when media changes.
   12401  */
   12402 static void
   12403 wm_gmii_statchg(struct ifnet *ifp)
   12404 {
   12405 	struct wm_softc *sc = ifp->if_softc;
   12406 	struct mii_data *mii = &sc->sc_mii;
   12407 
   12408 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   12409 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12410 	sc->sc_fcrtl &= ~FCRTL_XONE;
   12411 
   12412 	/* Get flow control negotiation result. */
   12413 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   12414 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   12415 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   12416 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   12417 	}
   12418 
   12419 	if (sc->sc_flowflags & IFM_FLOW) {
   12420 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   12421 			sc->sc_ctrl |= CTRL_TFCE;
   12422 			sc->sc_fcrtl |= FCRTL_XONE;
   12423 		}
   12424 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   12425 			sc->sc_ctrl |= CTRL_RFCE;
   12426 	}
   12427 
   12428 	if (mii->mii_media_active & IFM_FDX) {
   12429 		DPRINTF(sc, WM_DEBUG_LINK,
   12430 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   12431 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12432 	} else {
   12433 		DPRINTF(sc, WM_DEBUG_LINK,
   12434 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   12435 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12436 	}
   12437 
   12438 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12439 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12440 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   12441 						 : WMREG_FCRTL, sc->sc_fcrtl);
   12442 	if (sc->sc_type == WM_T_80003) {
   12443 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   12444 		case IFM_1000_T:
   12445 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12446 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   12447 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   12448 			break;
   12449 		default:
   12450 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12451 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   12452 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   12453 			break;
   12454 		}
   12455 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   12456 	}
   12457 }
   12458 
   12459 /* kumeran related (80003, ICH* and PCH*) */
   12460 
   12461 /*
   12462  * wm_kmrn_readreg:
   12463  *
   12464  *	Read a kumeran register
   12465  */
   12466 static int
   12467 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   12468 {
   12469 	int rv;
   12470 
   12471 	if (sc->sc_type == WM_T_80003)
   12472 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12473 	else
   12474 		rv = sc->phy.acquire(sc);
   12475 	if (rv != 0) {
   12476 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12477 		    __func__);
   12478 		return rv;
   12479 	}
   12480 
   12481 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   12482 
   12483 	if (sc->sc_type == WM_T_80003)
   12484 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12485 	else
   12486 		sc->phy.release(sc);
   12487 
   12488 	return rv;
   12489 }
   12490 
   12491 static int
   12492 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   12493 {
   12494 
   12495 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12496 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   12497 	    KUMCTRLSTA_REN);
   12498 	CSR_WRITE_FLUSH(sc);
   12499 	delay(2);
   12500 
   12501 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   12502 
   12503 	return 0;
   12504 }
   12505 
   12506 /*
   12507  * wm_kmrn_writereg:
   12508  *
   12509  *	Write a kumeran register
   12510  */
   12511 static int
   12512 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   12513 {
   12514 	int rv;
   12515 
   12516 	if (sc->sc_type == WM_T_80003)
   12517 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12518 	else
   12519 		rv = sc->phy.acquire(sc);
   12520 	if (rv != 0) {
   12521 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12522 		    __func__);
   12523 		return rv;
   12524 	}
   12525 
   12526 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   12527 
   12528 	if (sc->sc_type == WM_T_80003)
   12529 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12530 	else
   12531 		sc->phy.release(sc);
   12532 
   12533 	return rv;
   12534 }
   12535 
   12536 static int
   12537 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   12538 {
   12539 
   12540 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12541 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   12542 
   12543 	return 0;
   12544 }
   12545 
   12546 /*
   12547  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   12548  * This access method is different from IEEE MMD.
   12549  */
   12550 static int
   12551 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   12552 {
   12553 	struct wm_softc *sc = device_private(dev);
   12554 	int rv;
   12555 
   12556 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   12557 	if (rv != 0)
   12558 		return rv;
   12559 
   12560 	if (rd)
   12561 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12562 	else
   12563 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12564 	return rv;
   12565 }
   12566 
   12567 static int
   12568 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12569 {
   12570 
   12571 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12572 }
   12573 
   12574 static int
   12575 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12576 {
   12577 
   12578 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12579 }
   12580 
   12581 /* SGMII related */
   12582 
   12583 /*
   12584  * wm_sgmii_uses_mdio
   12585  *
   12586  * Check whether the transaction is to the internal PHY or the external
   12587  * MDIO interface. Return true if it's MDIO.
   12588  */
   12589 static bool
   12590 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12591 {
   12592 	uint32_t reg;
   12593 	bool ismdio = false;
   12594 
   12595 	switch (sc->sc_type) {
   12596 	case WM_T_82575:
   12597 	case WM_T_82576:
   12598 		reg = CSR_READ(sc, WMREG_MDIC);
   12599 		ismdio = ((reg & MDIC_DEST) != 0);
   12600 		break;
   12601 	case WM_T_82580:
   12602 	case WM_T_I350:
   12603 	case WM_T_I354:
   12604 	case WM_T_I210:
   12605 	case WM_T_I211:
   12606 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12607 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12608 		break;
   12609 	default:
   12610 		break;
   12611 	}
   12612 
   12613 	return ismdio;
   12614 }
   12615 
   12616 /* Setup internal SGMII PHY for SFP */
   12617 static void
   12618 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12619 {
   12620 	uint16_t id1, id2, phyreg;
   12621 	int i, rv;
   12622 
   12623 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12624 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12625 		return;
   12626 
   12627 	for (i = 0; i < MII_NPHY; i++) {
   12628 		sc->phy.no_errprint = true;
   12629 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12630 		if (rv != 0)
   12631 			continue;
   12632 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12633 		if (rv != 0)
   12634 			continue;
   12635 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12636 			continue;
   12637 		sc->phy.no_errprint = false;
   12638 
   12639 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12640 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12641 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12642 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12643 		break;
   12644 	}
   12645 
   12646 }
   12647 
   12648 /*
   12649  * wm_sgmii_readreg:	[mii interface function]
   12650  *
   12651  *	Read a PHY register on the SGMII
   12652  * This could be handled by the PHY layer if we didn't have to lock the
   12653  * resource ...
   12654  */
   12655 static int
   12656 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12657 {
   12658 	struct wm_softc *sc = device_private(dev);
   12659 	int rv;
   12660 
   12661 	rv = sc->phy.acquire(sc);
   12662 	if (rv != 0) {
   12663 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12664 		return rv;
   12665 	}
   12666 
   12667 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12668 
   12669 	sc->phy.release(sc);
   12670 	return rv;
   12671 }
   12672 
   12673 static int
   12674 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12675 {
   12676 	struct wm_softc *sc = device_private(dev);
   12677 	uint32_t i2ccmd;
   12678 	int i, rv = 0;
   12679 
   12680 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12681 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12682 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12683 
   12684 	/* Poll the ready bit */
   12685 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12686 		delay(50);
   12687 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12688 		if (i2ccmd & I2CCMD_READY)
   12689 			break;
   12690 	}
   12691 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12692 		device_printf(dev, "I2CCMD Read did not complete\n");
   12693 		rv = ETIMEDOUT;
   12694 	}
   12695 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12696 		if (!sc->phy.no_errprint)
   12697 			device_printf(dev, "I2CCMD Error bit set\n");
   12698 		rv = EIO;
   12699 	}
   12700 
   12701 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12702 
   12703 	return rv;
   12704 }
   12705 
   12706 /*
   12707  * wm_sgmii_writereg:	[mii interface function]
   12708  *
   12709  *	Write a PHY register on the SGMII.
   12710  * This could be handled by the PHY layer if we didn't have to lock the
   12711  * resource ...
   12712  */
   12713 static int
   12714 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12715 {
   12716 	struct wm_softc *sc = device_private(dev);
   12717 	int rv;
   12718 
   12719 	rv = sc->phy.acquire(sc);
   12720 	if (rv != 0) {
   12721 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12722 		return rv;
   12723 	}
   12724 
   12725 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12726 
   12727 	sc->phy.release(sc);
   12728 
   12729 	return rv;
   12730 }
   12731 
   12732 static int
   12733 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12734 {
   12735 	struct wm_softc *sc = device_private(dev);
   12736 	uint32_t i2ccmd;
   12737 	uint16_t swapdata;
   12738 	int rv = 0;
   12739 	int i;
   12740 
   12741 	/* Swap the data bytes for the I2C interface */
   12742 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12743 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12744 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12745 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12746 
   12747 	/* Poll the ready bit */
   12748 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12749 		delay(50);
   12750 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12751 		if (i2ccmd & I2CCMD_READY)
   12752 			break;
   12753 	}
   12754 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12755 		device_printf(dev, "I2CCMD Write did not complete\n");
   12756 		rv = ETIMEDOUT;
   12757 	}
   12758 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12759 		device_printf(dev, "I2CCMD Error bit set\n");
   12760 		rv = EIO;
   12761 	}
   12762 
   12763 	return rv;
   12764 }
   12765 
   12766 /* TBI related */
   12767 
   12768 static bool
   12769 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12770 {
   12771 	bool sig;
   12772 
   12773 	sig = ctrl & CTRL_SWDPIN(1);
   12774 
   12775 	/*
   12776 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12777 	 * detect a signal, 1 if they don't.
   12778 	 */
   12779 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12780 		sig = !sig;
   12781 
   12782 	return sig;
   12783 }
   12784 
   12785 /*
   12786  * wm_tbi_mediainit:
   12787  *
   12788  *	Initialize media for use on 1000BASE-X devices.
   12789  */
   12790 static void
   12791 wm_tbi_mediainit(struct wm_softc *sc)
   12792 {
   12793 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12794 	const char *sep = "";
   12795 
   12796 	if (sc->sc_type < WM_T_82543)
   12797 		sc->sc_tipg = TIPG_WM_DFLT;
   12798 	else
   12799 		sc->sc_tipg = TIPG_LG_DFLT;
   12800 
   12801 	sc->sc_tbi_serdes_anegticks = 5;
   12802 
   12803 	/* Initialize our media structures */
   12804 	sc->sc_mii.mii_ifp = ifp;
   12805 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12806 
   12807 	ifp->if_baudrate = IF_Gbps(1);
   12808 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12809 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12810 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12811 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12812 		    sc->sc_core_lock);
   12813 	} else {
   12814 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12815 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12816 	}
   12817 
   12818 	/*
   12819 	 * SWD Pins:
   12820 	 *
   12821 	 *	0 = Link LED (output)
   12822 	 *	1 = Loss Of Signal (input)
   12823 	 */
   12824 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12825 
   12826 	/* XXX Perhaps this is only for TBI */
   12827 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12828 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12829 
   12830 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12831 		sc->sc_ctrl &= ~CTRL_LRST;
   12832 
   12833 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12834 
   12835 #define	ADD(ss, mm, dd)							  \
   12836 do {									  \
   12837 	aprint_normal("%s%s", sep, ss);					  \
   12838 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12839 	sep = ", ";							  \
   12840 } while (/*CONSTCOND*/0)
   12841 
   12842 	aprint_normal_dev(sc->sc_dev, "");
   12843 
   12844 	if (sc->sc_type == WM_T_I354) {
   12845 		uint32_t status;
   12846 
   12847 		status = CSR_READ(sc, WMREG_STATUS);
   12848 		if (((status & STATUS_2P5_SKU) != 0)
   12849 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12850 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12851 		} else
   12852 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12853 	} else if (sc->sc_type == WM_T_82545) {
   12854 		/* Only 82545 is LX (XXX except SFP) */
   12855 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12856 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12857 	} else if (sc->sc_sfptype != 0) {
   12858 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12859 		switch (sc->sc_sfptype) {
   12860 		default:
   12861 		case SFF_SFP_ETH_FLAGS_1000SX:
   12862 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12863 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12864 			break;
   12865 		case SFF_SFP_ETH_FLAGS_1000LX:
   12866 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12867 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12868 			break;
   12869 		case SFF_SFP_ETH_FLAGS_1000CX:
   12870 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12871 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12872 			break;
   12873 		case SFF_SFP_ETH_FLAGS_1000T:
   12874 			ADD("1000baseT", IFM_1000_T, 0);
   12875 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12876 			break;
   12877 		case SFF_SFP_ETH_FLAGS_100FX:
   12878 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12879 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12880 			break;
   12881 		}
   12882 	} else {
   12883 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12884 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12885 	}
   12886 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12887 	aprint_normal("\n");
   12888 
   12889 #undef ADD
   12890 
   12891 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12892 }
   12893 
   12894 /*
   12895  * wm_tbi_mediachange:	[ifmedia interface function]
   12896  *
   12897  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12898  */
   12899 static int
   12900 wm_tbi_mediachange(struct ifnet *ifp)
   12901 {
   12902 	struct wm_softc *sc = ifp->if_softc;
   12903 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12904 	uint32_t status, ctrl;
   12905 	bool signal;
   12906 	int i;
   12907 
   12908 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12909 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12910 		/* XXX need some work for >= 82571 and < 82575 */
   12911 		if (sc->sc_type < WM_T_82575)
   12912 			return 0;
   12913 	}
   12914 
   12915 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12916 	    || (sc->sc_type >= WM_T_82575))
   12917 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12918 
   12919 	sc->sc_ctrl &= ~CTRL_LRST;
   12920 	sc->sc_txcw = TXCW_ANE;
   12921 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12922 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12923 	else if (ife->ifm_media & IFM_FDX)
   12924 		sc->sc_txcw |= TXCW_FD;
   12925 	else
   12926 		sc->sc_txcw |= TXCW_HD;
   12927 
   12928 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12929 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12930 
   12931 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12932 		device_xname(sc->sc_dev), sc->sc_txcw));
   12933 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12934 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12935 	CSR_WRITE_FLUSH(sc);
   12936 	delay(1000);
   12937 
   12938 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12939 	signal = wm_tbi_havesignal(sc, ctrl);
   12940 
   12941 	DPRINTF(sc, WM_DEBUG_LINK,
   12942 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
   12943 
   12944 	if (signal) {
   12945 		/* Have signal; wait for the link to come up. */
   12946 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12947 			delay(10000);
   12948 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12949 				break;
   12950 		}
   12951 
   12952 		DPRINTF(sc, WM_DEBUG_LINK,
   12953 		    ("%s: i = %d after waiting for link\n",
   12954 			device_xname(sc->sc_dev), i));
   12955 
   12956 		status = CSR_READ(sc, WMREG_STATUS);
   12957 		DPRINTF(sc, WM_DEBUG_LINK,
   12958 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
   12959 			__PRIxBIT "\n",
   12960 			device_xname(sc->sc_dev), status, STATUS_LU));
   12961 		if (status & STATUS_LU) {
   12962 			/* Link is up. */
   12963 			DPRINTF(sc, WM_DEBUG_LINK,
   12964 			    ("%s: LINK: set media -> link up %s\n",
   12965 				device_xname(sc->sc_dev),
   12966 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12967 
   12968 			/*
   12969 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12970 			 * so we should update sc->sc_ctrl
   12971 			 */
   12972 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12973 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12974 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12975 			if (status & STATUS_FD)
   12976 				sc->sc_tctl |=
   12977 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12978 			else
   12979 				sc->sc_tctl |=
   12980 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12981 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12982 				sc->sc_fcrtl |= FCRTL_XONE;
   12983 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12984 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12985 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12986 			sc->sc_tbi_linkup = 1;
   12987 		} else {
   12988 			if (i == WM_LINKUP_TIMEOUT)
   12989 				wm_check_for_link(sc);
   12990 			/* Link is down. */
   12991 			DPRINTF(sc, WM_DEBUG_LINK,
   12992 			    ("%s: LINK: set media -> link down\n",
   12993 				device_xname(sc->sc_dev)));
   12994 			sc->sc_tbi_linkup = 0;
   12995 		}
   12996 	} else {
   12997 		DPRINTF(sc, WM_DEBUG_LINK,
   12998 		    ("%s: LINK: set media -> no signal\n",
   12999 			device_xname(sc->sc_dev)));
   13000 		sc->sc_tbi_linkup = 0;
   13001 	}
   13002 
   13003 	wm_tbi_serdes_set_linkled(sc);
   13004 
   13005 	return 0;
   13006 }
   13007 
   13008 /*
   13009  * wm_tbi_mediastatus:	[ifmedia interface function]
   13010  *
   13011  *	Get the current interface media status on a 1000BASE-X device.
   13012  */
   13013 static void
   13014 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13015 {
   13016 	struct wm_softc *sc = ifp->if_softc;
   13017 	uint32_t ctrl, status;
   13018 
   13019 	ifmr->ifm_status = IFM_AVALID;
   13020 	ifmr->ifm_active = IFM_ETHER;
   13021 
   13022 	status = CSR_READ(sc, WMREG_STATUS);
   13023 	if ((status & STATUS_LU) == 0) {
   13024 		ifmr->ifm_active |= IFM_NONE;
   13025 		return;
   13026 	}
   13027 
   13028 	ifmr->ifm_status |= IFM_ACTIVE;
   13029 	/* Only 82545 is LX */
   13030 	if (sc->sc_type == WM_T_82545)
   13031 		ifmr->ifm_active |= IFM_1000_LX;
   13032 	else
   13033 		ifmr->ifm_active |= IFM_1000_SX;
   13034 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   13035 		ifmr->ifm_active |= IFM_FDX;
   13036 	else
   13037 		ifmr->ifm_active |= IFM_HDX;
   13038 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13039 	if (ctrl & CTRL_RFCE)
   13040 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   13041 	if (ctrl & CTRL_TFCE)
   13042 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   13043 }
   13044 
   13045 /* XXX TBI only */
   13046 static int
   13047 wm_check_for_link(struct wm_softc *sc)
   13048 {
   13049 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13050 	uint32_t rxcw;
   13051 	uint32_t ctrl;
   13052 	uint32_t status;
   13053 	bool signal;
   13054 
   13055 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   13056 		device_xname(sc->sc_dev), __func__));
   13057 
   13058 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13059 		/* XXX need some work for >= 82571 */
   13060 		if (sc->sc_type >= WM_T_82571) {
   13061 			sc->sc_tbi_linkup = 1;
   13062 			return 0;
   13063 		}
   13064 	}
   13065 
   13066 	rxcw = CSR_READ(sc, WMREG_RXCW);
   13067 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13068 	status = CSR_READ(sc, WMREG_STATUS);
   13069 	signal = wm_tbi_havesignal(sc, ctrl);
   13070 
   13071 	DPRINTF(sc, WM_DEBUG_LINK,
   13072 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   13073 		device_xname(sc->sc_dev), __func__, signal,
   13074 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   13075 
   13076 	/*
   13077 	 * SWDPIN   LU RXCW
   13078 	 *	0    0	  0
   13079 	 *	0    0	  1	(should not happen)
   13080 	 *	0    1	  0	(should not happen)
   13081 	 *	0    1	  1	(should not happen)
   13082 	 *	1    0	  0	Disable autonego and force linkup
   13083 	 *	1    0	  1	got /C/ but not linkup yet
   13084 	 *	1    1	  0	(linkup)
   13085 	 *	1    1	  1	If IFM_AUTO, back to autonego
   13086 	 *
   13087 	 */
   13088 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   13089 		DPRINTF(sc, WM_DEBUG_LINK,
   13090 		    ("%s: %s: force linkup and fullduplex\n",
   13091 			device_xname(sc->sc_dev), __func__));
   13092 		sc->sc_tbi_linkup = 0;
   13093 		/* Disable auto-negotiation in the TXCW register */
   13094 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   13095 
   13096 		/*
   13097 		 * Force link-up and also force full-duplex.
   13098 		 *
   13099 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   13100 		 * so we should update sc->sc_ctrl
   13101 		 */
   13102 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   13103 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13104 	} else if (((status & STATUS_LU) != 0)
   13105 	    && ((rxcw & RXCW_C) != 0)
   13106 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   13107 		sc->sc_tbi_linkup = 1;
   13108 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   13109 			device_xname(sc->sc_dev), __func__));
   13110 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13111 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   13112 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   13113 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   13114 			device_xname(sc->sc_dev), __func__));
   13115 	} else {
   13116 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   13117 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   13118 			status));
   13119 	}
   13120 
   13121 	return 0;
   13122 }
   13123 
   13124 /*
   13125  * wm_tbi_tick:
   13126  *
   13127  *	Check the link on TBI devices.
   13128  *	This function acts as mii_tick().
   13129  */
   13130 static void
   13131 wm_tbi_tick(struct wm_softc *sc)
   13132 {
   13133 	struct mii_data *mii = &sc->sc_mii;
   13134 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13135 	uint32_t status;
   13136 
   13137 	KASSERT(WM_CORE_LOCKED(sc));
   13138 
   13139 	status = CSR_READ(sc, WMREG_STATUS);
   13140 
   13141 	/* XXX is this needed? */
   13142 	(void)CSR_READ(sc, WMREG_RXCW);
   13143 	(void)CSR_READ(sc, WMREG_CTRL);
   13144 
   13145 	/* set link status */
   13146 	if ((status & STATUS_LU) == 0) {
   13147 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   13148 			device_xname(sc->sc_dev)));
   13149 		sc->sc_tbi_linkup = 0;
   13150 	} else if (sc->sc_tbi_linkup == 0) {
   13151 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   13152 			device_xname(sc->sc_dev),
   13153 			(status & STATUS_FD) ? "FDX" : "HDX"));
   13154 		sc->sc_tbi_linkup = 1;
   13155 		sc->sc_tbi_serdes_ticks = 0;
   13156 	}
   13157 
   13158 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   13159 		goto setled;
   13160 
   13161 	if ((status & STATUS_LU) == 0) {
   13162 		sc->sc_tbi_linkup = 0;
   13163 		/* If the timer expired, retry autonegotiation */
   13164 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13165 		    && (++sc->sc_tbi_serdes_ticks
   13166 			>= sc->sc_tbi_serdes_anegticks)) {
   13167 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13168 				device_xname(sc->sc_dev), __func__));
   13169 			sc->sc_tbi_serdes_ticks = 0;
   13170 			/*
   13171 			 * Reset the link, and let autonegotiation do
   13172 			 * its thing
   13173 			 */
   13174 			sc->sc_ctrl |= CTRL_LRST;
   13175 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13176 			CSR_WRITE_FLUSH(sc);
   13177 			delay(1000);
   13178 			sc->sc_ctrl &= ~CTRL_LRST;
   13179 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13180 			CSR_WRITE_FLUSH(sc);
   13181 			delay(1000);
   13182 			CSR_WRITE(sc, WMREG_TXCW,
   13183 			    sc->sc_txcw & ~TXCW_ANE);
   13184 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13185 		}
   13186 	}
   13187 
   13188 setled:
   13189 	wm_tbi_serdes_set_linkled(sc);
   13190 }
   13191 
   13192 /* SERDES related */
   13193 static void
   13194 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   13195 {
   13196 	uint32_t reg;
   13197 
   13198 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13199 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13200 		return;
   13201 
   13202 	/* Enable PCS to turn on link */
   13203 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   13204 	reg |= PCS_CFG_PCS_EN;
   13205 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   13206 
   13207 	/* Power up the laser */
   13208 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13209 	reg &= ~CTRL_EXT_SWDPIN(3);
   13210 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13211 
   13212 	/* Flush the write to verify completion */
   13213 	CSR_WRITE_FLUSH(sc);
   13214 	delay(1000);
   13215 }
   13216 
   13217 static int
   13218 wm_serdes_mediachange(struct ifnet *ifp)
   13219 {
   13220 	struct wm_softc *sc = ifp->if_softc;
   13221 	bool pcs_autoneg = true; /* XXX */
   13222 	uint32_t ctrl_ext, pcs_lctl, reg;
   13223 
   13224 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13225 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13226 		return 0;
   13227 
   13228 	/* XXX Currently, this function is not called on 8257[12] */
   13229 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13230 	    || (sc->sc_type >= WM_T_82575))
   13231 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13232 
   13233 	/* Power on the sfp cage if present */
   13234 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13235 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13236 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   13237 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13238 
   13239 	sc->sc_ctrl |= CTRL_SLU;
   13240 
   13241 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   13242 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   13243 
   13244 		reg = CSR_READ(sc, WMREG_CONNSW);
   13245 		reg |= CONNSW_ENRGSRC;
   13246 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   13247 	}
   13248 
   13249 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   13250 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   13251 	case CTRL_EXT_LINK_MODE_SGMII:
   13252 		/* SGMII mode lets the phy handle forcing speed/duplex */
   13253 		pcs_autoneg = true;
   13254 		/* Autoneg time out should be disabled for SGMII mode */
   13255 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   13256 		break;
   13257 	case CTRL_EXT_LINK_MODE_1000KX:
   13258 		pcs_autoneg = false;
   13259 		/* FALLTHROUGH */
   13260 	default:
   13261 		if ((sc->sc_type == WM_T_82575)
   13262 		    || (sc->sc_type == WM_T_82576)) {
   13263 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   13264 				pcs_autoneg = false;
   13265 		}
   13266 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   13267 		    | CTRL_FRCFDX;
   13268 
   13269 		/* Set speed of 1000/Full if speed/duplex is forced */
   13270 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   13271 	}
   13272 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13273 
   13274 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   13275 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   13276 
   13277 	if (pcs_autoneg) {
   13278 		/* Set PCS register for autoneg */
   13279 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   13280 
   13281 		/* Disable force flow control for autoneg */
   13282 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   13283 
   13284 		/* Configure flow control advertisement for autoneg */
   13285 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   13286 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   13287 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   13288 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   13289 	} else
   13290 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   13291 
   13292 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   13293 
   13294 	return 0;
   13295 }
   13296 
   13297 static void
   13298 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13299 {
   13300 	struct wm_softc *sc = ifp->if_softc;
   13301 	struct mii_data *mii = &sc->sc_mii;
   13302 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13303 	uint32_t pcs_adv, pcs_lpab, reg;
   13304 
   13305 	ifmr->ifm_status = IFM_AVALID;
   13306 	ifmr->ifm_active = IFM_ETHER;
   13307 
   13308 	/* Check PCS */
   13309 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13310 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   13311 		ifmr->ifm_active |= IFM_NONE;
   13312 		sc->sc_tbi_linkup = 0;
   13313 		goto setled;
   13314 	}
   13315 
   13316 	sc->sc_tbi_linkup = 1;
   13317 	ifmr->ifm_status |= IFM_ACTIVE;
   13318 	if (sc->sc_type == WM_T_I354) {
   13319 		uint32_t status;
   13320 
   13321 		status = CSR_READ(sc, WMREG_STATUS);
   13322 		if (((status & STATUS_2P5_SKU) != 0)
   13323 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13324 			ifmr->ifm_active |= IFM_2500_KX;
   13325 		} else
   13326 			ifmr->ifm_active |= IFM_1000_KX;
   13327 	} else {
   13328 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   13329 		case PCS_LSTS_SPEED_10:
   13330 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   13331 			break;
   13332 		case PCS_LSTS_SPEED_100:
   13333 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   13334 			break;
   13335 		case PCS_LSTS_SPEED_1000:
   13336 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13337 			break;
   13338 		default:
   13339 			device_printf(sc->sc_dev, "Unknown speed\n");
   13340 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13341 			break;
   13342 		}
   13343 	}
   13344 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   13345 	if ((reg & PCS_LSTS_FDX) != 0)
   13346 		ifmr->ifm_active |= IFM_FDX;
   13347 	else
   13348 		ifmr->ifm_active |= IFM_HDX;
   13349 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   13350 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   13351 		/* Check flow */
   13352 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13353 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   13354 			DPRINTF(sc, WM_DEBUG_LINK,
   13355 			    ("XXX LINKOK but not ACOMP\n"));
   13356 			goto setled;
   13357 		}
   13358 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   13359 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   13360 		DPRINTF(sc, WM_DEBUG_LINK,
   13361 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   13362 		if ((pcs_adv & TXCW_SYM_PAUSE)
   13363 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   13364 			mii->mii_media_active |= IFM_FLOW
   13365 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   13366 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   13367 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13368 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   13369 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13370 			mii->mii_media_active |= IFM_FLOW
   13371 			    | IFM_ETH_TXPAUSE;
   13372 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   13373 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13374 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   13375 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13376 			mii->mii_media_active |= IFM_FLOW
   13377 			    | IFM_ETH_RXPAUSE;
   13378 		}
   13379 	}
   13380 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   13381 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   13382 setled:
   13383 	wm_tbi_serdes_set_linkled(sc);
   13384 }
   13385 
   13386 /*
   13387  * wm_serdes_tick:
   13388  *
   13389  *	Check the link on serdes devices.
   13390  */
   13391 static void
   13392 wm_serdes_tick(struct wm_softc *sc)
   13393 {
   13394 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13395 	struct mii_data *mii = &sc->sc_mii;
   13396 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13397 	uint32_t reg;
   13398 
   13399 	KASSERT(WM_CORE_LOCKED(sc));
   13400 
   13401 	mii->mii_media_status = IFM_AVALID;
   13402 	mii->mii_media_active = IFM_ETHER;
   13403 
   13404 	/* Check PCS */
   13405 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13406 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   13407 		mii->mii_media_status |= IFM_ACTIVE;
   13408 		sc->sc_tbi_linkup = 1;
   13409 		sc->sc_tbi_serdes_ticks = 0;
   13410 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   13411 		if ((reg & PCS_LSTS_FDX) != 0)
   13412 			mii->mii_media_active |= IFM_FDX;
   13413 		else
   13414 			mii->mii_media_active |= IFM_HDX;
   13415 	} else {
   13416 		mii->mii_media_status |= IFM_NONE;
   13417 		sc->sc_tbi_linkup = 0;
   13418 		/* If the timer expired, retry autonegotiation */
   13419 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13420 		    && (++sc->sc_tbi_serdes_ticks
   13421 			>= sc->sc_tbi_serdes_anegticks)) {
   13422 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13423 				device_xname(sc->sc_dev), __func__));
   13424 			sc->sc_tbi_serdes_ticks = 0;
   13425 			/* XXX */
   13426 			wm_serdes_mediachange(ifp);
   13427 		}
   13428 	}
   13429 
   13430 	wm_tbi_serdes_set_linkled(sc);
   13431 }
   13432 
   13433 /* SFP related */
   13434 
   13435 static int
   13436 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   13437 {
   13438 	uint32_t i2ccmd;
   13439 	int i;
   13440 
   13441 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13442 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13443 
   13444 	/* Poll the ready bit */
   13445 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13446 		delay(50);
   13447 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13448 		if (i2ccmd & I2CCMD_READY)
   13449 			break;
   13450 	}
   13451 	if ((i2ccmd & I2CCMD_READY) == 0)
   13452 		return -1;
   13453 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   13454 		return -1;
   13455 
   13456 	*data = i2ccmd & 0x00ff;
   13457 
   13458 	return 0;
   13459 }
   13460 
   13461 static uint32_t
   13462 wm_sfp_get_media_type(struct wm_softc *sc)
   13463 {
   13464 	uint32_t ctrl_ext;
   13465 	uint8_t val = 0;
   13466 	int timeout = 3;
   13467 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   13468 	int rv = -1;
   13469 
   13470 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13471 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13472 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   13473 	CSR_WRITE_FLUSH(sc);
   13474 
   13475 	/* Read SFP module data */
   13476 	while (timeout) {
   13477 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   13478 		if (rv == 0)
   13479 			break;
   13480 		delay(100*1000); /* XXX too big */
   13481 		timeout--;
   13482 	}
   13483 	if (rv != 0)
   13484 		goto out;
   13485 
   13486 	switch (val) {
   13487 	case SFF_SFP_ID_SFF:
   13488 		aprint_normal_dev(sc->sc_dev,
   13489 		    "Module/Connector soldered to board\n");
   13490 		break;
   13491 	case SFF_SFP_ID_SFP:
   13492 		sc->sc_flags |= WM_F_SFP;
   13493 		break;
   13494 	case SFF_SFP_ID_UNKNOWN:
   13495 		goto out;
   13496 	default:
   13497 		break;
   13498 	}
   13499 
   13500 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   13501 	if (rv != 0)
   13502 		goto out;
   13503 
   13504 	sc->sc_sfptype = val;
   13505 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   13506 		mediatype = WM_MEDIATYPE_SERDES;
   13507 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   13508 		sc->sc_flags |= WM_F_SGMII;
   13509 		mediatype = WM_MEDIATYPE_COPPER;
   13510 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   13511 		sc->sc_flags |= WM_F_SGMII;
   13512 		mediatype = WM_MEDIATYPE_SERDES;
   13513 	} else {
   13514 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   13515 		    __func__, sc->sc_sfptype);
   13516 		sc->sc_sfptype = 0; /* XXX unknown */
   13517 	}
   13518 
   13519 out:
   13520 	/* Restore I2C interface setting */
   13521 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13522 
   13523 	return mediatype;
   13524 }
   13525 
   13526 /*
   13527  * NVM related.
   13528  * Microwire, SPI (w/wo EERD) and Flash.
   13529  */
   13530 
   13531 /* Both spi and uwire */
   13532 
   13533 /*
   13534  * wm_eeprom_sendbits:
   13535  *
   13536  *	Send a series of bits to the EEPROM.
   13537  */
   13538 static void
   13539 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   13540 {
   13541 	uint32_t reg;
   13542 	int x;
   13543 
   13544 	reg = CSR_READ(sc, WMREG_EECD);
   13545 
   13546 	for (x = nbits; x > 0; x--) {
   13547 		if (bits & (1U << (x - 1)))
   13548 			reg |= EECD_DI;
   13549 		else
   13550 			reg &= ~EECD_DI;
   13551 		CSR_WRITE(sc, WMREG_EECD, reg);
   13552 		CSR_WRITE_FLUSH(sc);
   13553 		delay(2);
   13554 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13555 		CSR_WRITE_FLUSH(sc);
   13556 		delay(2);
   13557 		CSR_WRITE(sc, WMREG_EECD, reg);
   13558 		CSR_WRITE_FLUSH(sc);
   13559 		delay(2);
   13560 	}
   13561 }
   13562 
   13563 /*
   13564  * wm_eeprom_recvbits:
   13565  *
   13566  *	Receive a series of bits from the EEPROM.
   13567  */
   13568 static void
   13569 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13570 {
   13571 	uint32_t reg, val;
   13572 	int x;
   13573 
   13574 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13575 
   13576 	val = 0;
   13577 	for (x = nbits; x > 0; x--) {
   13578 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13579 		CSR_WRITE_FLUSH(sc);
   13580 		delay(2);
   13581 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13582 			val |= (1U << (x - 1));
   13583 		CSR_WRITE(sc, WMREG_EECD, reg);
   13584 		CSR_WRITE_FLUSH(sc);
   13585 		delay(2);
   13586 	}
   13587 	*valp = val;
   13588 }
   13589 
   13590 /* Microwire */
   13591 
   13592 /*
   13593  * wm_nvm_read_uwire:
   13594  *
   13595  *	Read a word from the EEPROM using the MicroWire protocol.
   13596  */
   13597 static int
   13598 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13599 {
   13600 	uint32_t reg, val;
   13601 	int i, rv;
   13602 
   13603 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13604 		device_xname(sc->sc_dev), __func__));
   13605 
   13606 	rv = sc->nvm.acquire(sc);
   13607 	if (rv != 0)
   13608 		return rv;
   13609 
   13610 	for (i = 0; i < wordcnt; i++) {
   13611 		/* Clear SK and DI. */
   13612 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13613 		CSR_WRITE(sc, WMREG_EECD, reg);
   13614 
   13615 		/*
   13616 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13617 		 * and Xen.
   13618 		 *
   13619 		 * We use this workaround only for 82540 because qemu's
   13620 		 * e1000 act as 82540.
   13621 		 */
   13622 		if (sc->sc_type == WM_T_82540) {
   13623 			reg |= EECD_SK;
   13624 			CSR_WRITE(sc, WMREG_EECD, reg);
   13625 			reg &= ~EECD_SK;
   13626 			CSR_WRITE(sc, WMREG_EECD, reg);
   13627 			CSR_WRITE_FLUSH(sc);
   13628 			delay(2);
   13629 		}
   13630 		/* XXX: end of workaround */
   13631 
   13632 		/* Set CHIP SELECT. */
   13633 		reg |= EECD_CS;
   13634 		CSR_WRITE(sc, WMREG_EECD, reg);
   13635 		CSR_WRITE_FLUSH(sc);
   13636 		delay(2);
   13637 
   13638 		/* Shift in the READ command. */
   13639 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13640 
   13641 		/* Shift in address. */
   13642 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13643 
   13644 		/* Shift out the data. */
   13645 		wm_eeprom_recvbits(sc, &val, 16);
   13646 		data[i] = val & 0xffff;
   13647 
   13648 		/* Clear CHIP SELECT. */
   13649 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13650 		CSR_WRITE(sc, WMREG_EECD, reg);
   13651 		CSR_WRITE_FLUSH(sc);
   13652 		delay(2);
   13653 	}
   13654 
   13655 	sc->nvm.release(sc);
   13656 	return 0;
   13657 }
   13658 
   13659 /* SPI */
   13660 
   13661 /*
   13662  * Set SPI and FLASH related information from the EECD register.
   13663  * For 82541 and 82547, the word size is taken from EEPROM.
   13664  */
   13665 static int
   13666 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13667 {
   13668 	int size;
   13669 	uint32_t reg;
   13670 	uint16_t data;
   13671 
   13672 	reg = CSR_READ(sc, WMREG_EECD);
   13673 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13674 
   13675 	/* Read the size of NVM from EECD by default */
   13676 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13677 	switch (sc->sc_type) {
   13678 	case WM_T_82541:
   13679 	case WM_T_82541_2:
   13680 	case WM_T_82547:
   13681 	case WM_T_82547_2:
   13682 		/* Set dummy value to access EEPROM */
   13683 		sc->sc_nvm_wordsize = 64;
   13684 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13685 			aprint_error_dev(sc->sc_dev,
   13686 			    "%s: failed to read EEPROM size\n", __func__);
   13687 		}
   13688 		reg = data;
   13689 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13690 		if (size == 0)
   13691 			size = 6; /* 64 word size */
   13692 		else
   13693 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13694 		break;
   13695 	case WM_T_80003:
   13696 	case WM_T_82571:
   13697 	case WM_T_82572:
   13698 	case WM_T_82573: /* SPI case */
   13699 	case WM_T_82574: /* SPI case */
   13700 	case WM_T_82583: /* SPI case */
   13701 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13702 		if (size > 14)
   13703 			size = 14;
   13704 		break;
   13705 	case WM_T_82575:
   13706 	case WM_T_82576:
   13707 	case WM_T_82580:
   13708 	case WM_T_I350:
   13709 	case WM_T_I354:
   13710 	case WM_T_I210:
   13711 	case WM_T_I211:
   13712 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13713 		if (size > 15)
   13714 			size = 15;
   13715 		break;
   13716 	default:
   13717 		aprint_error_dev(sc->sc_dev,
   13718 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13719 		return -1;
   13720 		break;
   13721 	}
   13722 
   13723 	sc->sc_nvm_wordsize = 1 << size;
   13724 
   13725 	return 0;
   13726 }
   13727 
   13728 /*
   13729  * wm_nvm_ready_spi:
   13730  *
   13731  *	Wait for a SPI EEPROM to be ready for commands.
   13732  */
   13733 static int
   13734 wm_nvm_ready_spi(struct wm_softc *sc)
   13735 {
   13736 	uint32_t val;
   13737 	int usec;
   13738 
   13739 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13740 		device_xname(sc->sc_dev), __func__));
   13741 
   13742 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13743 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13744 		wm_eeprom_recvbits(sc, &val, 8);
   13745 		if ((val & SPI_SR_RDY) == 0)
   13746 			break;
   13747 	}
   13748 	if (usec >= SPI_MAX_RETRIES) {
   13749 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13750 		return -1;
   13751 	}
   13752 	return 0;
   13753 }
   13754 
   13755 /*
   13756  * wm_nvm_read_spi:
   13757  *
   13758  *	Read a work from the EEPROM using the SPI protocol.
   13759  */
   13760 static int
   13761 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13762 {
   13763 	uint32_t reg, val;
   13764 	int i;
   13765 	uint8_t opc;
   13766 	int rv;
   13767 
   13768 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13769 		device_xname(sc->sc_dev), __func__));
   13770 
   13771 	rv = sc->nvm.acquire(sc);
   13772 	if (rv != 0)
   13773 		return rv;
   13774 
   13775 	/* Clear SK and CS. */
   13776 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13777 	CSR_WRITE(sc, WMREG_EECD, reg);
   13778 	CSR_WRITE_FLUSH(sc);
   13779 	delay(2);
   13780 
   13781 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13782 		goto out;
   13783 
   13784 	/* Toggle CS to flush commands. */
   13785 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13786 	CSR_WRITE_FLUSH(sc);
   13787 	delay(2);
   13788 	CSR_WRITE(sc, WMREG_EECD, reg);
   13789 	CSR_WRITE_FLUSH(sc);
   13790 	delay(2);
   13791 
   13792 	opc = SPI_OPC_READ;
   13793 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13794 		opc |= SPI_OPC_A8;
   13795 
   13796 	wm_eeprom_sendbits(sc, opc, 8);
   13797 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13798 
   13799 	for (i = 0; i < wordcnt; i++) {
   13800 		wm_eeprom_recvbits(sc, &val, 16);
   13801 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13802 	}
   13803 
   13804 	/* Raise CS and clear SK. */
   13805 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13806 	CSR_WRITE(sc, WMREG_EECD, reg);
   13807 	CSR_WRITE_FLUSH(sc);
   13808 	delay(2);
   13809 
   13810 out:
   13811 	sc->nvm.release(sc);
   13812 	return rv;
   13813 }
   13814 
   13815 /* Using with EERD */
   13816 
   13817 static int
   13818 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13819 {
   13820 	uint32_t attempts = 100000;
   13821 	uint32_t i, reg = 0;
   13822 	int32_t done = -1;
   13823 
   13824 	for (i = 0; i < attempts; i++) {
   13825 		reg = CSR_READ(sc, rw);
   13826 
   13827 		if (reg & EERD_DONE) {
   13828 			done = 0;
   13829 			break;
   13830 		}
   13831 		delay(5);
   13832 	}
   13833 
   13834 	return done;
   13835 }
   13836 
   13837 static int
   13838 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13839 {
   13840 	int i, eerd = 0;
   13841 	int rv;
   13842 
   13843 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13844 		device_xname(sc->sc_dev), __func__));
   13845 
   13846 	rv = sc->nvm.acquire(sc);
   13847 	if (rv != 0)
   13848 		return rv;
   13849 
   13850 	for (i = 0; i < wordcnt; i++) {
   13851 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13852 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13853 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13854 		if (rv != 0) {
   13855 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13856 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13857 			break;
   13858 		}
   13859 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13860 	}
   13861 
   13862 	sc->nvm.release(sc);
   13863 	return rv;
   13864 }
   13865 
   13866 /* Flash */
   13867 
   13868 static int
   13869 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13870 {
   13871 	uint32_t eecd;
   13872 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13873 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13874 	uint32_t nvm_dword = 0;
   13875 	uint8_t sig_byte = 0;
   13876 	int rv;
   13877 
   13878 	switch (sc->sc_type) {
   13879 	case WM_T_PCH_SPT:
   13880 	case WM_T_PCH_CNP:
   13881 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13882 		act_offset = ICH_NVM_SIG_WORD * 2;
   13883 
   13884 		/* Set bank to 0 in case flash read fails. */
   13885 		*bank = 0;
   13886 
   13887 		/* Check bank 0 */
   13888 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13889 		if (rv != 0)
   13890 			return rv;
   13891 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13892 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13893 			*bank = 0;
   13894 			return 0;
   13895 		}
   13896 
   13897 		/* Check bank 1 */
   13898 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13899 		    &nvm_dword);
   13900 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13901 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13902 			*bank = 1;
   13903 			return 0;
   13904 		}
   13905 		aprint_error_dev(sc->sc_dev,
   13906 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13907 		return -1;
   13908 	case WM_T_ICH8:
   13909 	case WM_T_ICH9:
   13910 		eecd = CSR_READ(sc, WMREG_EECD);
   13911 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13912 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13913 			return 0;
   13914 		}
   13915 		/* FALLTHROUGH */
   13916 	default:
   13917 		/* Default to 0 */
   13918 		*bank = 0;
   13919 
   13920 		/* Check bank 0 */
   13921 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13922 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13923 			*bank = 0;
   13924 			return 0;
   13925 		}
   13926 
   13927 		/* Check bank 1 */
   13928 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13929 		    &sig_byte);
   13930 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13931 			*bank = 1;
   13932 			return 0;
   13933 		}
   13934 	}
   13935 
   13936 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13937 		device_xname(sc->sc_dev)));
   13938 	return -1;
   13939 }
   13940 
   13941 /******************************************************************************
   13942  * This function does initial flash setup so that a new read/write/erase cycle
   13943  * can be started.
   13944  *
   13945  * sc - The pointer to the hw structure
   13946  ****************************************************************************/
   13947 static int32_t
   13948 wm_ich8_cycle_init(struct wm_softc *sc)
   13949 {
   13950 	uint16_t hsfsts;
   13951 	int32_t error = 1;
   13952 	int32_t i     = 0;
   13953 
   13954 	if (sc->sc_type >= WM_T_PCH_SPT)
   13955 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13956 	else
   13957 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13958 
   13959 	/* May be check the Flash Des Valid bit in Hw status */
   13960 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13961 		return error;
   13962 
   13963 	/* Clear FCERR in Hw status by writing 1 */
   13964 	/* Clear DAEL in Hw status by writing a 1 */
   13965 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13966 
   13967 	if (sc->sc_type >= WM_T_PCH_SPT)
   13968 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13969 	else
   13970 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13971 
   13972 	/*
   13973 	 * Either we should have a hardware SPI cycle in progress bit to check
   13974 	 * against, in order to start a new cycle or FDONE bit should be
   13975 	 * changed in the hardware so that it is 1 after hardware reset, which
   13976 	 * can then be used as an indication whether a cycle is in progress or
   13977 	 * has been completed .. we should also have some software semaphore
   13978 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13979 	 * threads access to those bits can be sequentiallized or a way so that
   13980 	 * 2 threads don't start the cycle at the same time
   13981 	 */
   13982 
   13983 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13984 		/*
   13985 		 * There is no cycle running at present, so we can start a
   13986 		 * cycle
   13987 		 */
   13988 
   13989 		/* Begin by setting Flash Cycle Done. */
   13990 		hsfsts |= HSFSTS_DONE;
   13991 		if (sc->sc_type >= WM_T_PCH_SPT)
   13992 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13993 			    hsfsts & 0xffffUL);
   13994 		else
   13995 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13996 		error = 0;
   13997 	} else {
   13998 		/*
   13999 		 * Otherwise poll for sometime so the current cycle has a
   14000 		 * chance to end before giving up.
   14001 		 */
   14002 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   14003 			if (sc->sc_type >= WM_T_PCH_SPT)
   14004 				hsfsts = ICH8_FLASH_READ32(sc,
   14005 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14006 			else
   14007 				hsfsts = ICH8_FLASH_READ16(sc,
   14008 				    ICH_FLASH_HSFSTS);
   14009 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14010 				error = 0;
   14011 				break;
   14012 			}
   14013 			delay(1);
   14014 		}
   14015 		if (error == 0) {
   14016 			/*
   14017 			 * Successful in waiting for previous cycle to timeout,
   14018 			 * now set the Flash Cycle Done.
   14019 			 */
   14020 			hsfsts |= HSFSTS_DONE;
   14021 			if (sc->sc_type >= WM_T_PCH_SPT)
   14022 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14023 				    hsfsts & 0xffffUL);
   14024 			else
   14025 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   14026 				    hsfsts);
   14027 		}
   14028 	}
   14029 	return error;
   14030 }
   14031 
   14032 /******************************************************************************
   14033  * This function starts a flash cycle and waits for its completion
   14034  *
   14035  * sc - The pointer to the hw structure
   14036  ****************************************************************************/
   14037 static int32_t
   14038 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   14039 {
   14040 	uint16_t hsflctl;
   14041 	uint16_t hsfsts;
   14042 	int32_t error = 1;
   14043 	uint32_t i = 0;
   14044 
   14045 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   14046 	if (sc->sc_type >= WM_T_PCH_SPT)
   14047 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   14048 	else
   14049 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14050 	hsflctl |= HSFCTL_GO;
   14051 	if (sc->sc_type >= WM_T_PCH_SPT)
   14052 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14053 		    (uint32_t)hsflctl << 16);
   14054 	else
   14055 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14056 
   14057 	/* Wait till FDONE bit is set to 1 */
   14058 	do {
   14059 		if (sc->sc_type >= WM_T_PCH_SPT)
   14060 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14061 			    & 0xffffUL;
   14062 		else
   14063 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14064 		if (hsfsts & HSFSTS_DONE)
   14065 			break;
   14066 		delay(1);
   14067 		i++;
   14068 	} while (i < timeout);
   14069 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   14070 		error = 0;
   14071 
   14072 	return error;
   14073 }
   14074 
   14075 /******************************************************************************
   14076  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   14077  *
   14078  * sc - The pointer to the hw structure
   14079  * index - The index of the byte or word to read.
   14080  * size - Size of data to read, 1=byte 2=word, 4=dword
   14081  * data - Pointer to the word to store the value read.
   14082  *****************************************************************************/
   14083 static int32_t
   14084 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   14085     uint32_t size, uint32_t *data)
   14086 {
   14087 	uint16_t hsfsts;
   14088 	uint16_t hsflctl;
   14089 	uint32_t flash_linear_address;
   14090 	uint32_t flash_data = 0;
   14091 	int32_t error = 1;
   14092 	int32_t count = 0;
   14093 
   14094 	if (size < 1  || size > 4 || data == 0x0 ||
   14095 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   14096 		return error;
   14097 
   14098 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   14099 	    sc->sc_ich8_flash_base;
   14100 
   14101 	do {
   14102 		delay(1);
   14103 		/* Steps */
   14104 		error = wm_ich8_cycle_init(sc);
   14105 		if (error)
   14106 			break;
   14107 
   14108 		if (sc->sc_type >= WM_T_PCH_SPT)
   14109 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14110 			    >> 16;
   14111 		else
   14112 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14113 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   14114 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   14115 		    & HSFCTL_BCOUNT_MASK;
   14116 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   14117 		if (sc->sc_type >= WM_T_PCH_SPT) {
   14118 			/*
   14119 			 * In SPT, This register is in Lan memory space, not
   14120 			 * flash. Therefore, only 32 bit access is supported.
   14121 			 */
   14122 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14123 			    (uint32_t)hsflctl << 16);
   14124 		} else
   14125 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14126 
   14127 		/*
   14128 		 * Write the last 24 bits of index into Flash Linear address
   14129 		 * field in Flash Address
   14130 		 */
   14131 		/* TODO: TBD maybe check the index against the size of flash */
   14132 
   14133 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   14134 
   14135 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   14136 
   14137 		/*
   14138 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   14139 		 * the whole sequence a few more times, else read in (shift in)
   14140 		 * the Flash Data0, the order is least significant byte first
   14141 		 * msb to lsb
   14142 		 */
   14143 		if (error == 0) {
   14144 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   14145 			if (size == 1)
   14146 				*data = (uint8_t)(flash_data & 0x000000FF);
   14147 			else if (size == 2)
   14148 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   14149 			else if (size == 4)
   14150 				*data = (uint32_t)flash_data;
   14151 			break;
   14152 		} else {
   14153 			/*
   14154 			 * If we've gotten here, then things are probably
   14155 			 * completely hosed, but if the error condition is
   14156 			 * detected, it won't hurt to give it another try...
   14157 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   14158 			 */
   14159 			if (sc->sc_type >= WM_T_PCH_SPT)
   14160 				hsfsts = ICH8_FLASH_READ32(sc,
   14161 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14162 			else
   14163 				hsfsts = ICH8_FLASH_READ16(sc,
   14164 				    ICH_FLASH_HSFSTS);
   14165 
   14166 			if (hsfsts & HSFSTS_ERR) {
   14167 				/* Repeat for some time before giving up. */
   14168 				continue;
   14169 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   14170 				break;
   14171 		}
   14172 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   14173 
   14174 	return error;
   14175 }
   14176 
   14177 /******************************************************************************
   14178  * Reads a single byte from the NVM using the ICH8 flash access registers.
   14179  *
   14180  * sc - pointer to wm_hw structure
   14181  * index - The index of the byte to read.
   14182  * data - Pointer to a byte to store the value read.
   14183  *****************************************************************************/
   14184 static int32_t
   14185 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   14186 {
   14187 	int32_t status;
   14188 	uint32_t word = 0;
   14189 
   14190 	status = wm_read_ich8_data(sc, index, 1, &word);
   14191 	if (status == 0)
   14192 		*data = (uint8_t)word;
   14193 	else
   14194 		*data = 0;
   14195 
   14196 	return status;
   14197 }
   14198 
   14199 /******************************************************************************
   14200  * Reads a word from the NVM using the ICH8 flash access registers.
   14201  *
   14202  * sc - pointer to wm_hw structure
   14203  * index - The starting byte index of the word to read.
   14204  * data - Pointer to a word to store the value read.
   14205  *****************************************************************************/
   14206 static int32_t
   14207 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   14208 {
   14209 	int32_t status;
   14210 	uint32_t word = 0;
   14211 
   14212 	status = wm_read_ich8_data(sc, index, 2, &word);
   14213 	if (status == 0)
   14214 		*data = (uint16_t)word;
   14215 	else
   14216 		*data = 0;
   14217 
   14218 	return status;
   14219 }
   14220 
   14221 /******************************************************************************
   14222  * Reads a dword from the NVM using the ICH8 flash access registers.
   14223  *
   14224  * sc - pointer to wm_hw structure
   14225  * index - The starting byte index of the word to read.
   14226  * data - Pointer to a word to store the value read.
   14227  *****************************************************************************/
   14228 static int32_t
   14229 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   14230 {
   14231 	int32_t status;
   14232 
   14233 	status = wm_read_ich8_data(sc, index, 4, data);
   14234 	return status;
   14235 }
   14236 
   14237 /******************************************************************************
   14238  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   14239  * register.
   14240  *
   14241  * sc - Struct containing variables accessed by shared code
   14242  * offset - offset of word in the EEPROM to read
   14243  * data - word read from the EEPROM
   14244  * words - number of words to read
   14245  *****************************************************************************/
   14246 static int
   14247 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14248 {
   14249 	int rv;
   14250 	uint32_t flash_bank = 0;
   14251 	uint32_t act_offset = 0;
   14252 	uint32_t bank_offset = 0;
   14253 	uint16_t word = 0;
   14254 	uint16_t i = 0;
   14255 
   14256 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14257 		device_xname(sc->sc_dev), __func__));
   14258 
   14259 	rv = sc->nvm.acquire(sc);
   14260 	if (rv != 0)
   14261 		return rv;
   14262 
   14263 	/*
   14264 	 * We need to know which is the valid flash bank.  In the event
   14265 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14266 	 * managing flash_bank. So it cannot be trusted and needs
   14267 	 * to be updated with each read.
   14268 	 */
   14269 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14270 	if (rv) {
   14271 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14272 			device_xname(sc->sc_dev)));
   14273 		flash_bank = 0;
   14274 	}
   14275 
   14276 	/*
   14277 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14278 	 * size
   14279 	 */
   14280 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14281 
   14282 	for (i = 0; i < words; i++) {
   14283 		/* The NVM part needs a byte offset, hence * 2 */
   14284 		act_offset = bank_offset + ((offset + i) * 2);
   14285 		rv = wm_read_ich8_word(sc, act_offset, &word);
   14286 		if (rv) {
   14287 			aprint_error_dev(sc->sc_dev,
   14288 			    "%s: failed to read NVM\n", __func__);
   14289 			break;
   14290 		}
   14291 		data[i] = word;
   14292 	}
   14293 
   14294 	sc->nvm.release(sc);
   14295 	return rv;
   14296 }
   14297 
   14298 /******************************************************************************
   14299  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   14300  * register.
   14301  *
   14302  * sc - Struct containing variables accessed by shared code
   14303  * offset - offset of word in the EEPROM to read
   14304  * data - word read from the EEPROM
   14305  * words - number of words to read
   14306  *****************************************************************************/
   14307 static int
   14308 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14309 {
   14310 	int	 rv;
   14311 	uint32_t flash_bank = 0;
   14312 	uint32_t act_offset = 0;
   14313 	uint32_t bank_offset = 0;
   14314 	uint32_t dword = 0;
   14315 	uint16_t i = 0;
   14316 
   14317 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14318 		device_xname(sc->sc_dev), __func__));
   14319 
   14320 	rv = sc->nvm.acquire(sc);
   14321 	if (rv != 0)
   14322 		return rv;
   14323 
   14324 	/*
   14325 	 * We need to know which is the valid flash bank.  In the event
   14326 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14327 	 * managing flash_bank. So it cannot be trusted and needs
   14328 	 * to be updated with each read.
   14329 	 */
   14330 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14331 	if (rv) {
   14332 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14333 			device_xname(sc->sc_dev)));
   14334 		flash_bank = 0;
   14335 	}
   14336 
   14337 	/*
   14338 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14339 	 * size
   14340 	 */
   14341 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14342 
   14343 	for (i = 0; i < words; i++) {
   14344 		/* The NVM part needs a byte offset, hence * 2 */
   14345 		act_offset = bank_offset + ((offset + i) * 2);
   14346 		/* but we must read dword aligned, so mask ... */
   14347 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   14348 		if (rv) {
   14349 			aprint_error_dev(sc->sc_dev,
   14350 			    "%s: failed to read NVM\n", __func__);
   14351 			break;
   14352 		}
   14353 		/* ... and pick out low or high word */
   14354 		if ((act_offset & 0x2) == 0)
   14355 			data[i] = (uint16_t)(dword & 0xFFFF);
   14356 		else
   14357 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   14358 	}
   14359 
   14360 	sc->nvm.release(sc);
   14361 	return rv;
   14362 }
   14363 
   14364 /* iNVM */
   14365 
   14366 static int
   14367 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   14368 {
   14369 	int32_t	 rv = 0;
   14370 	uint32_t invm_dword;
   14371 	uint16_t i;
   14372 	uint8_t record_type, word_address;
   14373 
   14374 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14375 		device_xname(sc->sc_dev), __func__));
   14376 
   14377 	for (i = 0; i < INVM_SIZE; i++) {
   14378 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   14379 		/* Get record type */
   14380 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   14381 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   14382 			break;
   14383 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   14384 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   14385 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   14386 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   14387 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   14388 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   14389 			if (word_address == address) {
   14390 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   14391 				rv = 0;
   14392 				break;
   14393 			}
   14394 		}
   14395 	}
   14396 
   14397 	return rv;
   14398 }
   14399 
   14400 static int
   14401 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14402 {
   14403 	int i, rv;
   14404 
   14405 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14406 		device_xname(sc->sc_dev), __func__));
   14407 
   14408 	rv = sc->nvm.acquire(sc);
   14409 	if (rv != 0)
   14410 		return rv;
   14411 
   14412 	for (i = 0; i < words; i++) {
   14413 		switch (offset + i) {
   14414 		case NVM_OFF_MACADDR:
   14415 		case NVM_OFF_MACADDR1:
   14416 		case NVM_OFF_MACADDR2:
   14417 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   14418 			if (rv != 0) {
   14419 				data[i] = 0xffff;
   14420 				rv = -1;
   14421 			}
   14422 			break;
   14423 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   14424 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14425 			if (rv != 0) {
   14426 				*data = INVM_DEFAULT_AL;
   14427 				rv = 0;
   14428 			}
   14429 			break;
   14430 		case NVM_OFF_CFG2:
   14431 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14432 			if (rv != 0) {
   14433 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   14434 				rv = 0;
   14435 			}
   14436 			break;
   14437 		case NVM_OFF_CFG4:
   14438 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14439 			if (rv != 0) {
   14440 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   14441 				rv = 0;
   14442 			}
   14443 			break;
   14444 		case NVM_OFF_LED_1_CFG:
   14445 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14446 			if (rv != 0) {
   14447 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   14448 				rv = 0;
   14449 			}
   14450 			break;
   14451 		case NVM_OFF_LED_0_2_CFG:
   14452 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14453 			if (rv != 0) {
   14454 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   14455 				rv = 0;
   14456 			}
   14457 			break;
   14458 		case NVM_OFF_ID_LED_SETTINGS:
   14459 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14460 			if (rv != 0) {
   14461 				*data = ID_LED_RESERVED_FFFF;
   14462 				rv = 0;
   14463 			}
   14464 			break;
   14465 		default:
   14466 			DPRINTF(sc, WM_DEBUG_NVM,
   14467 			    ("NVM word 0x%02x is not mapped.\n", offset));
   14468 			*data = NVM_RESERVED_WORD;
   14469 			break;
   14470 		}
   14471 	}
   14472 
   14473 	sc->nvm.release(sc);
   14474 	return rv;
   14475 }
   14476 
   14477 /* Lock, detecting NVM type, validate checksum, version and read */
   14478 
   14479 static int
   14480 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   14481 {
   14482 	uint32_t eecd = 0;
   14483 
   14484 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   14485 	    || sc->sc_type == WM_T_82583) {
   14486 		eecd = CSR_READ(sc, WMREG_EECD);
   14487 
   14488 		/* Isolate bits 15 & 16 */
   14489 		eecd = ((eecd >> 15) & 0x03);
   14490 
   14491 		/* If both bits are set, device is Flash type */
   14492 		if (eecd == 0x03)
   14493 			return 0;
   14494 	}
   14495 	return 1;
   14496 }
   14497 
   14498 static int
   14499 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   14500 {
   14501 	uint32_t eec;
   14502 
   14503 	eec = CSR_READ(sc, WMREG_EEC);
   14504 	if ((eec & EEC_FLASH_DETECTED) != 0)
   14505 		return 1;
   14506 
   14507 	return 0;
   14508 }
   14509 
   14510 /*
   14511  * wm_nvm_validate_checksum
   14512  *
   14513  * The checksum is defined as the sum of the first 64 (16 bit) words.
   14514  */
   14515 static int
   14516 wm_nvm_validate_checksum(struct wm_softc *sc)
   14517 {
   14518 	uint16_t checksum;
   14519 	uint16_t eeprom_data;
   14520 #ifdef WM_DEBUG
   14521 	uint16_t csum_wordaddr, valid_checksum;
   14522 #endif
   14523 	int i;
   14524 
   14525 	checksum = 0;
   14526 
   14527 	/* Don't check for I211 */
   14528 	if (sc->sc_type == WM_T_I211)
   14529 		return 0;
   14530 
   14531 #ifdef WM_DEBUG
   14532 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   14533 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   14534 		csum_wordaddr = NVM_OFF_COMPAT;
   14535 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   14536 	} else {
   14537 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   14538 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   14539 	}
   14540 
   14541 	/* Dump EEPROM image for debug */
   14542 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14543 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14544 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   14545 		/* XXX PCH_SPT? */
   14546 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   14547 		if ((eeprom_data & valid_checksum) == 0)
   14548 			DPRINTF(sc, WM_DEBUG_NVM,
   14549 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   14550 				device_xname(sc->sc_dev), eeprom_data,
   14551 				    valid_checksum));
   14552 	}
   14553 
   14554 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   14555 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   14556 		for (i = 0; i < NVM_SIZE; i++) {
   14557 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14558 				printf("XXXX ");
   14559 			else
   14560 				printf("%04hx ", eeprom_data);
   14561 			if (i % 8 == 7)
   14562 				printf("\n");
   14563 		}
   14564 	}
   14565 
   14566 #endif /* WM_DEBUG */
   14567 
   14568 	for (i = 0; i < NVM_SIZE; i++) {
   14569 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14570 			return 1;
   14571 		checksum += eeprom_data;
   14572 	}
   14573 
   14574 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14575 #ifdef WM_DEBUG
   14576 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14577 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14578 #endif
   14579 	}
   14580 
   14581 	return 0;
   14582 }
   14583 
   14584 static void
   14585 wm_nvm_version_invm(struct wm_softc *sc)
   14586 {
   14587 	uint32_t dword;
   14588 
   14589 	/*
   14590 	 * Linux's code to decode version is very strange, so we don't
   14591 	 * obey that algorithm and just use word 61 as the document.
   14592 	 * Perhaps it's not perfect though...
   14593 	 *
   14594 	 * Example:
   14595 	 *
   14596 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14597 	 */
   14598 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14599 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14600 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14601 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14602 }
   14603 
   14604 static void
   14605 wm_nvm_version(struct wm_softc *sc)
   14606 {
   14607 	uint16_t major, minor, build, patch;
   14608 	uint16_t uid0, uid1;
   14609 	uint16_t nvm_data;
   14610 	uint16_t off;
   14611 	bool check_version = false;
   14612 	bool check_optionrom = false;
   14613 	bool have_build = false;
   14614 	bool have_uid = true;
   14615 
   14616 	/*
   14617 	 * Version format:
   14618 	 *
   14619 	 * XYYZ
   14620 	 * X0YZ
   14621 	 * X0YY
   14622 	 *
   14623 	 * Example:
   14624 	 *
   14625 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14626 	 *	82571	0x50a6	5.10.6?
   14627 	 *	82572	0x506a	5.6.10?
   14628 	 *	82572EI	0x5069	5.6.9?
   14629 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14630 	 *		0x2013	2.1.3?
   14631 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14632 	 * ICH8+82567	0x0040	0.4.0?
   14633 	 * ICH9+82566	0x1040	1.4.0?
   14634 	 *ICH10+82567	0x0043	0.4.3?
   14635 	 *  PCH+82577	0x00c1	0.12.1?
   14636 	 * PCH2+82579	0x00d3	0.13.3?
   14637 	 *		0x00d4	0.13.4?
   14638 	 *  LPT+I218	0x0023	0.2.3?
   14639 	 *  SPT+I219	0x0084	0.8.4?
   14640 	 *  CNP+I219	0x0054	0.5.4?
   14641 	 */
   14642 
   14643 	/*
   14644 	 * XXX
   14645 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14646 	 * I've never seen real 82574 hardware with such small SPI ROM.
   14647 	 */
   14648 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14649 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14650 		have_uid = false;
   14651 
   14652 	switch (sc->sc_type) {
   14653 	case WM_T_82571:
   14654 	case WM_T_82572:
   14655 	case WM_T_82574:
   14656 	case WM_T_82583:
   14657 		check_version = true;
   14658 		check_optionrom = true;
   14659 		have_build = true;
   14660 		break;
   14661 	case WM_T_ICH8:
   14662 	case WM_T_ICH9:
   14663 	case WM_T_ICH10:
   14664 	case WM_T_PCH:
   14665 	case WM_T_PCH2:
   14666 	case WM_T_PCH_LPT:
   14667 	case WM_T_PCH_SPT:
   14668 	case WM_T_PCH_CNP:
   14669 		check_version = true;
   14670 		have_build = true;
   14671 		have_uid = false;
   14672 		break;
   14673 	case WM_T_82575:
   14674 	case WM_T_82576:
   14675 	case WM_T_82580:
   14676 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14677 			check_version = true;
   14678 		break;
   14679 	case WM_T_I211:
   14680 		wm_nvm_version_invm(sc);
   14681 		have_uid = false;
   14682 		goto printver;
   14683 	case WM_T_I210:
   14684 		if (!wm_nvm_flash_presence_i210(sc)) {
   14685 			wm_nvm_version_invm(sc);
   14686 			have_uid = false;
   14687 			goto printver;
   14688 		}
   14689 		/* FALLTHROUGH */
   14690 	case WM_T_I350:
   14691 	case WM_T_I354:
   14692 		check_version = true;
   14693 		check_optionrom = true;
   14694 		break;
   14695 	default:
   14696 		return;
   14697 	}
   14698 	if (check_version
   14699 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14700 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14701 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14702 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14703 			build = nvm_data & NVM_BUILD_MASK;
   14704 			have_build = true;
   14705 		} else
   14706 			minor = nvm_data & 0x00ff;
   14707 
   14708 		/* Decimal */
   14709 		minor = (minor / 16) * 10 + (minor % 16);
   14710 		sc->sc_nvm_ver_major = major;
   14711 		sc->sc_nvm_ver_minor = minor;
   14712 
   14713 printver:
   14714 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14715 		    sc->sc_nvm_ver_minor);
   14716 		if (have_build) {
   14717 			sc->sc_nvm_ver_build = build;
   14718 			aprint_verbose(".%d", build);
   14719 		}
   14720 	}
   14721 
   14722 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14723 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14724 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14725 		/* Option ROM Version */
   14726 		if ((off != 0x0000) && (off != 0xffff)) {
   14727 			int rv;
   14728 
   14729 			off += NVM_COMBO_VER_OFF;
   14730 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14731 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14732 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14733 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14734 				/* 16bits */
   14735 				major = uid0 >> 8;
   14736 				build = (uid0 << 8) | (uid1 >> 8);
   14737 				patch = uid1 & 0x00ff;
   14738 				aprint_verbose(", option ROM Version %d.%d.%d",
   14739 				    major, build, patch);
   14740 			}
   14741 		}
   14742 	}
   14743 
   14744 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14745 		aprint_verbose(", Image Unique ID %08x",
   14746 		    ((uint32_t)uid1 << 16) | uid0);
   14747 }
   14748 
   14749 /*
   14750  * wm_nvm_read:
   14751  *
   14752  *	Read data from the serial EEPROM.
   14753  */
   14754 static int
   14755 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14756 {
   14757 	int rv;
   14758 
   14759 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14760 		device_xname(sc->sc_dev), __func__));
   14761 
   14762 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14763 		return -1;
   14764 
   14765 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14766 
   14767 	return rv;
   14768 }
   14769 
   14770 /*
   14771  * Hardware semaphores.
   14772  * Very complexed...
   14773  */
   14774 
   14775 static int
   14776 wm_get_null(struct wm_softc *sc)
   14777 {
   14778 
   14779 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14780 		device_xname(sc->sc_dev), __func__));
   14781 	return 0;
   14782 }
   14783 
   14784 static void
   14785 wm_put_null(struct wm_softc *sc)
   14786 {
   14787 
   14788 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14789 		device_xname(sc->sc_dev), __func__));
   14790 	return;
   14791 }
   14792 
   14793 static int
   14794 wm_get_eecd(struct wm_softc *sc)
   14795 {
   14796 	uint32_t reg;
   14797 	int x;
   14798 
   14799 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14800 		device_xname(sc->sc_dev), __func__));
   14801 
   14802 	reg = CSR_READ(sc, WMREG_EECD);
   14803 
   14804 	/* Request EEPROM access. */
   14805 	reg |= EECD_EE_REQ;
   14806 	CSR_WRITE(sc, WMREG_EECD, reg);
   14807 
   14808 	/* ..and wait for it to be granted. */
   14809 	for (x = 0; x < 1000; x++) {
   14810 		reg = CSR_READ(sc, WMREG_EECD);
   14811 		if (reg & EECD_EE_GNT)
   14812 			break;
   14813 		delay(5);
   14814 	}
   14815 	if ((reg & EECD_EE_GNT) == 0) {
   14816 		aprint_error_dev(sc->sc_dev,
   14817 		    "could not acquire EEPROM GNT\n");
   14818 		reg &= ~EECD_EE_REQ;
   14819 		CSR_WRITE(sc, WMREG_EECD, reg);
   14820 		return -1;
   14821 	}
   14822 
   14823 	return 0;
   14824 }
   14825 
   14826 static void
   14827 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14828 {
   14829 
   14830 	*eecd |= EECD_SK;
   14831 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14832 	CSR_WRITE_FLUSH(sc);
   14833 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14834 		delay(1);
   14835 	else
   14836 		delay(50);
   14837 }
   14838 
   14839 static void
   14840 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14841 {
   14842 
   14843 	*eecd &= ~EECD_SK;
   14844 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14845 	CSR_WRITE_FLUSH(sc);
   14846 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14847 		delay(1);
   14848 	else
   14849 		delay(50);
   14850 }
   14851 
   14852 static void
   14853 wm_put_eecd(struct wm_softc *sc)
   14854 {
   14855 	uint32_t reg;
   14856 
   14857 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14858 		device_xname(sc->sc_dev), __func__));
   14859 
   14860 	/* Stop nvm */
   14861 	reg = CSR_READ(sc, WMREG_EECD);
   14862 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14863 		/* Pull CS high */
   14864 		reg |= EECD_CS;
   14865 		wm_nvm_eec_clock_lower(sc, &reg);
   14866 	} else {
   14867 		/* CS on Microwire is active-high */
   14868 		reg &= ~(EECD_CS | EECD_DI);
   14869 		CSR_WRITE(sc, WMREG_EECD, reg);
   14870 		wm_nvm_eec_clock_raise(sc, &reg);
   14871 		wm_nvm_eec_clock_lower(sc, &reg);
   14872 	}
   14873 
   14874 	reg = CSR_READ(sc, WMREG_EECD);
   14875 	reg &= ~EECD_EE_REQ;
   14876 	CSR_WRITE(sc, WMREG_EECD, reg);
   14877 
   14878 	return;
   14879 }
   14880 
   14881 /*
   14882  * Get hardware semaphore.
   14883  * Same as e1000_get_hw_semaphore_generic()
   14884  */
   14885 static int
   14886 wm_get_swsm_semaphore(struct wm_softc *sc)
   14887 {
   14888 	int32_t timeout;
   14889 	uint32_t swsm;
   14890 
   14891 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14892 		device_xname(sc->sc_dev), __func__));
   14893 	KASSERT(sc->sc_nvm_wordsize > 0);
   14894 
   14895 retry:
   14896 	/* Get the SW semaphore. */
   14897 	timeout = sc->sc_nvm_wordsize + 1;
   14898 	while (timeout) {
   14899 		swsm = CSR_READ(sc, WMREG_SWSM);
   14900 
   14901 		if ((swsm & SWSM_SMBI) == 0)
   14902 			break;
   14903 
   14904 		delay(50);
   14905 		timeout--;
   14906 	}
   14907 
   14908 	if (timeout == 0) {
   14909 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14910 			/*
   14911 			 * In rare circumstances, the SW semaphore may already
   14912 			 * be held unintentionally. Clear the semaphore once
   14913 			 * before giving up.
   14914 			 */
   14915 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14916 			wm_put_swsm_semaphore(sc);
   14917 			goto retry;
   14918 		}
   14919 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   14920 		return 1;
   14921 	}
   14922 
   14923 	/* Get the FW semaphore. */
   14924 	timeout = sc->sc_nvm_wordsize + 1;
   14925 	while (timeout) {
   14926 		swsm = CSR_READ(sc, WMREG_SWSM);
   14927 		swsm |= SWSM_SWESMBI;
   14928 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14929 		/* If we managed to set the bit we got the semaphore. */
   14930 		swsm = CSR_READ(sc, WMREG_SWSM);
   14931 		if (swsm & SWSM_SWESMBI)
   14932 			break;
   14933 
   14934 		delay(50);
   14935 		timeout--;
   14936 	}
   14937 
   14938 	if (timeout == 0) {
   14939 		aprint_error_dev(sc->sc_dev,
   14940 		    "could not acquire SWSM SWESMBI\n");
   14941 		/* Release semaphores */
   14942 		wm_put_swsm_semaphore(sc);
   14943 		return 1;
   14944 	}
   14945 	return 0;
   14946 }
   14947 
   14948 /*
   14949  * Put hardware semaphore.
   14950  * Same as e1000_put_hw_semaphore_generic()
   14951  */
   14952 static void
   14953 wm_put_swsm_semaphore(struct wm_softc *sc)
   14954 {
   14955 	uint32_t swsm;
   14956 
   14957 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14958 		device_xname(sc->sc_dev), __func__));
   14959 
   14960 	swsm = CSR_READ(sc, WMREG_SWSM);
   14961 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14962 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14963 }
   14964 
   14965 /*
   14966  * Get SW/FW semaphore.
   14967  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14968  */
   14969 static int
   14970 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14971 {
   14972 	uint32_t swfw_sync;
   14973 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14974 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14975 	int timeout;
   14976 
   14977 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14978 		device_xname(sc->sc_dev), __func__));
   14979 
   14980 	if (sc->sc_type == WM_T_80003)
   14981 		timeout = 50;
   14982 	else
   14983 		timeout = 200;
   14984 
   14985 	while (timeout) {
   14986 		if (wm_get_swsm_semaphore(sc)) {
   14987 			aprint_error_dev(sc->sc_dev,
   14988 			    "%s: failed to get semaphore\n",
   14989 			    __func__);
   14990 			return 1;
   14991 		}
   14992 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14993 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14994 			swfw_sync |= swmask;
   14995 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14996 			wm_put_swsm_semaphore(sc);
   14997 			return 0;
   14998 		}
   14999 		wm_put_swsm_semaphore(sc);
   15000 		delay(5000);
   15001 		timeout--;
   15002 	}
   15003 	device_printf(sc->sc_dev,
   15004 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   15005 	    mask, swfw_sync);
   15006 	return 1;
   15007 }
   15008 
   15009 static void
   15010 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15011 {
   15012 	uint32_t swfw_sync;
   15013 
   15014 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15015 		device_xname(sc->sc_dev), __func__));
   15016 
   15017 	while (wm_get_swsm_semaphore(sc) != 0)
   15018 		continue;
   15019 
   15020 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15021 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   15022 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15023 
   15024 	wm_put_swsm_semaphore(sc);
   15025 }
   15026 
   15027 static int
   15028 wm_get_nvm_80003(struct wm_softc *sc)
   15029 {
   15030 	int rv;
   15031 
   15032 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15033 		device_xname(sc->sc_dev), __func__));
   15034 
   15035 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   15036 		aprint_error_dev(sc->sc_dev,
   15037 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   15038 		return rv;
   15039 	}
   15040 
   15041 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15042 	    && (rv = wm_get_eecd(sc)) != 0) {
   15043 		aprint_error_dev(sc->sc_dev,
   15044 		    "%s: failed to get semaphore(EECD)\n", __func__);
   15045 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15046 		return rv;
   15047 	}
   15048 
   15049 	return 0;
   15050 }
   15051 
   15052 static void
   15053 wm_put_nvm_80003(struct wm_softc *sc)
   15054 {
   15055 
   15056 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15057 		device_xname(sc->sc_dev), __func__));
   15058 
   15059 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15060 		wm_put_eecd(sc);
   15061 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15062 }
   15063 
   15064 static int
   15065 wm_get_nvm_82571(struct wm_softc *sc)
   15066 {
   15067 	int rv;
   15068 
   15069 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15070 		device_xname(sc->sc_dev), __func__));
   15071 
   15072 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   15073 		return rv;
   15074 
   15075 	switch (sc->sc_type) {
   15076 	case WM_T_82573:
   15077 		break;
   15078 	default:
   15079 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15080 			rv = wm_get_eecd(sc);
   15081 		break;
   15082 	}
   15083 
   15084 	if (rv != 0) {
   15085 		aprint_error_dev(sc->sc_dev,
   15086 		    "%s: failed to get semaphore\n",
   15087 		    __func__);
   15088 		wm_put_swsm_semaphore(sc);
   15089 	}
   15090 
   15091 	return rv;
   15092 }
   15093 
   15094 static void
   15095 wm_put_nvm_82571(struct wm_softc *sc)
   15096 {
   15097 
   15098 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15099 		device_xname(sc->sc_dev), __func__));
   15100 
   15101 	switch (sc->sc_type) {
   15102 	case WM_T_82573:
   15103 		break;
   15104 	default:
   15105 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15106 			wm_put_eecd(sc);
   15107 		break;
   15108 	}
   15109 
   15110 	wm_put_swsm_semaphore(sc);
   15111 }
   15112 
   15113 static int
   15114 wm_get_phy_82575(struct wm_softc *sc)
   15115 {
   15116 
   15117 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15118 		device_xname(sc->sc_dev), __func__));
   15119 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15120 }
   15121 
   15122 static void
   15123 wm_put_phy_82575(struct wm_softc *sc)
   15124 {
   15125 
   15126 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15127 		device_xname(sc->sc_dev), __func__));
   15128 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15129 }
   15130 
   15131 static int
   15132 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   15133 {
   15134 	uint32_t ext_ctrl;
   15135 	int timeout = 200;
   15136 
   15137 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15138 		device_xname(sc->sc_dev), __func__));
   15139 
   15140 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15141 	for (timeout = 0; timeout < 200; timeout++) {
   15142 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15143 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15144 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15145 
   15146 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15147 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15148 			return 0;
   15149 		delay(5000);
   15150 	}
   15151 	device_printf(sc->sc_dev,
   15152 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   15153 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15154 	return 1;
   15155 }
   15156 
   15157 static void
   15158 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   15159 {
   15160 	uint32_t ext_ctrl;
   15161 
   15162 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15163 		device_xname(sc->sc_dev), __func__));
   15164 
   15165 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15166 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15167 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15168 
   15169 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15170 }
   15171 
   15172 static int
   15173 wm_get_swflag_ich8lan(struct wm_softc *sc)
   15174 {
   15175 	uint32_t ext_ctrl;
   15176 	int timeout;
   15177 
   15178 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15179 		device_xname(sc->sc_dev), __func__));
   15180 	mutex_enter(sc->sc_ich_phymtx);
   15181 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   15182 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15183 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   15184 			break;
   15185 		delay(1000);
   15186 	}
   15187 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   15188 		device_printf(sc->sc_dev,
   15189 		    "SW has already locked the resource\n");
   15190 		goto out;
   15191 	}
   15192 
   15193 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15194 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15195 	for (timeout = 0; timeout < 1000; timeout++) {
   15196 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15197 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15198 			break;
   15199 		delay(1000);
   15200 	}
   15201 	if (timeout >= 1000) {
   15202 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   15203 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15204 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15205 		goto out;
   15206 	}
   15207 	return 0;
   15208 
   15209 out:
   15210 	mutex_exit(sc->sc_ich_phymtx);
   15211 	return 1;
   15212 }
   15213 
   15214 static void
   15215 wm_put_swflag_ich8lan(struct wm_softc *sc)
   15216 {
   15217 	uint32_t ext_ctrl;
   15218 
   15219 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15220 		device_xname(sc->sc_dev), __func__));
   15221 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15222 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   15223 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15224 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15225 	} else
   15226 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   15227 
   15228 	mutex_exit(sc->sc_ich_phymtx);
   15229 }
   15230 
   15231 static int
   15232 wm_get_nvm_ich8lan(struct wm_softc *sc)
   15233 {
   15234 
   15235 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15236 		device_xname(sc->sc_dev), __func__));
   15237 	mutex_enter(sc->sc_ich_nvmmtx);
   15238 
   15239 	return 0;
   15240 }
   15241 
   15242 static void
   15243 wm_put_nvm_ich8lan(struct wm_softc *sc)
   15244 {
   15245 
   15246 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15247 		device_xname(sc->sc_dev), __func__));
   15248 	mutex_exit(sc->sc_ich_nvmmtx);
   15249 }
   15250 
   15251 static int
   15252 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   15253 {
   15254 	int i = 0;
   15255 	uint32_t reg;
   15256 
   15257 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15258 		device_xname(sc->sc_dev), __func__));
   15259 
   15260 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15261 	do {
   15262 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   15263 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15264 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15265 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   15266 			break;
   15267 		delay(2*1000);
   15268 		i++;
   15269 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   15270 
   15271 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   15272 		wm_put_hw_semaphore_82573(sc);
   15273 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   15274 		    device_xname(sc->sc_dev));
   15275 		return -1;
   15276 	}
   15277 
   15278 	return 0;
   15279 }
   15280 
   15281 static void
   15282 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   15283 {
   15284 	uint32_t reg;
   15285 
   15286 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15287 		device_xname(sc->sc_dev), __func__));
   15288 
   15289 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15290 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15291 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15292 }
   15293 
   15294 /*
   15295  * Management mode and power management related subroutines.
   15296  * BMC, AMT, suspend/resume and EEE.
   15297  */
   15298 
   15299 #ifdef WM_WOL
   15300 static int
   15301 wm_check_mng_mode(struct wm_softc *sc)
   15302 {
   15303 	int rv;
   15304 
   15305 	switch (sc->sc_type) {
   15306 	case WM_T_ICH8:
   15307 	case WM_T_ICH9:
   15308 	case WM_T_ICH10:
   15309 	case WM_T_PCH:
   15310 	case WM_T_PCH2:
   15311 	case WM_T_PCH_LPT:
   15312 	case WM_T_PCH_SPT:
   15313 	case WM_T_PCH_CNP:
   15314 		rv = wm_check_mng_mode_ich8lan(sc);
   15315 		break;
   15316 	case WM_T_82574:
   15317 	case WM_T_82583:
   15318 		rv = wm_check_mng_mode_82574(sc);
   15319 		break;
   15320 	case WM_T_82571:
   15321 	case WM_T_82572:
   15322 	case WM_T_82573:
   15323 	case WM_T_80003:
   15324 		rv = wm_check_mng_mode_generic(sc);
   15325 		break;
   15326 	default:
   15327 		/* Noting to do */
   15328 		rv = 0;
   15329 		break;
   15330 	}
   15331 
   15332 	return rv;
   15333 }
   15334 
   15335 static int
   15336 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   15337 {
   15338 	uint32_t fwsm;
   15339 
   15340 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15341 
   15342 	if (((fwsm & FWSM_FW_VALID) != 0)
   15343 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15344 		return 1;
   15345 
   15346 	return 0;
   15347 }
   15348 
   15349 static int
   15350 wm_check_mng_mode_82574(struct wm_softc *sc)
   15351 {
   15352 	uint16_t data;
   15353 
   15354 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15355 
   15356 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   15357 		return 1;
   15358 
   15359 	return 0;
   15360 }
   15361 
   15362 static int
   15363 wm_check_mng_mode_generic(struct wm_softc *sc)
   15364 {
   15365 	uint32_t fwsm;
   15366 
   15367 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15368 
   15369 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   15370 		return 1;
   15371 
   15372 	return 0;
   15373 }
   15374 #endif /* WM_WOL */
   15375 
   15376 static int
   15377 wm_enable_mng_pass_thru(struct wm_softc *sc)
   15378 {
   15379 	uint32_t manc, fwsm, factps;
   15380 
   15381 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   15382 		return 0;
   15383 
   15384 	manc = CSR_READ(sc, WMREG_MANC);
   15385 
   15386 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   15387 		device_xname(sc->sc_dev), manc));
   15388 	if ((manc & MANC_RECV_TCO_EN) == 0)
   15389 		return 0;
   15390 
   15391 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   15392 		fwsm = CSR_READ(sc, WMREG_FWSM);
   15393 		factps = CSR_READ(sc, WMREG_FACTPS);
   15394 		if (((factps & FACTPS_MNGCG) == 0)
   15395 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15396 			return 1;
   15397 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   15398 		uint16_t data;
   15399 
   15400 		factps = CSR_READ(sc, WMREG_FACTPS);
   15401 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15402 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   15403 			device_xname(sc->sc_dev), factps, data));
   15404 		if (((factps & FACTPS_MNGCG) == 0)
   15405 		    && ((data & NVM_CFG2_MNGM_MASK)
   15406 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   15407 			return 1;
   15408 	} else if (((manc & MANC_SMBUS_EN) != 0)
   15409 	    && ((manc & MANC_ASF_EN) == 0))
   15410 		return 1;
   15411 
   15412 	return 0;
   15413 }
   15414 
   15415 static bool
   15416 wm_phy_resetisblocked(struct wm_softc *sc)
   15417 {
   15418 	bool blocked = false;
   15419 	uint32_t reg;
   15420 	int i = 0;
   15421 
   15422 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15423 		device_xname(sc->sc_dev), __func__));
   15424 
   15425 	switch (sc->sc_type) {
   15426 	case WM_T_ICH8:
   15427 	case WM_T_ICH9:
   15428 	case WM_T_ICH10:
   15429 	case WM_T_PCH:
   15430 	case WM_T_PCH2:
   15431 	case WM_T_PCH_LPT:
   15432 	case WM_T_PCH_SPT:
   15433 	case WM_T_PCH_CNP:
   15434 		do {
   15435 			reg = CSR_READ(sc, WMREG_FWSM);
   15436 			if ((reg & FWSM_RSPCIPHY) == 0) {
   15437 				blocked = true;
   15438 				delay(10*1000);
   15439 				continue;
   15440 			}
   15441 			blocked = false;
   15442 		} while (blocked && (i++ < 30));
   15443 		return blocked;
   15444 		break;
   15445 	case WM_T_82571:
   15446 	case WM_T_82572:
   15447 	case WM_T_82573:
   15448 	case WM_T_82574:
   15449 	case WM_T_82583:
   15450 	case WM_T_80003:
   15451 		reg = CSR_READ(sc, WMREG_MANC);
   15452 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   15453 			return true;
   15454 		else
   15455 			return false;
   15456 		break;
   15457 	default:
   15458 		/* No problem */
   15459 		break;
   15460 	}
   15461 
   15462 	return false;
   15463 }
   15464 
   15465 static void
   15466 wm_get_hw_control(struct wm_softc *sc)
   15467 {
   15468 	uint32_t reg;
   15469 
   15470 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15471 		device_xname(sc->sc_dev), __func__));
   15472 
   15473 	if (sc->sc_type == WM_T_82573) {
   15474 		reg = CSR_READ(sc, WMREG_SWSM);
   15475 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   15476 	} else if (sc->sc_type >= WM_T_82571) {
   15477 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15478 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   15479 	}
   15480 }
   15481 
   15482 static void
   15483 wm_release_hw_control(struct wm_softc *sc)
   15484 {
   15485 	uint32_t reg;
   15486 
   15487 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15488 		device_xname(sc->sc_dev), __func__));
   15489 
   15490 	if (sc->sc_type == WM_T_82573) {
   15491 		reg = CSR_READ(sc, WMREG_SWSM);
   15492 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   15493 	} else if (sc->sc_type >= WM_T_82571) {
   15494 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15495 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   15496 	}
   15497 }
   15498 
   15499 static void
   15500 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   15501 {
   15502 	uint32_t reg;
   15503 
   15504 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15505 		device_xname(sc->sc_dev), __func__));
   15506 
   15507 	if (sc->sc_type < WM_T_PCH2)
   15508 		return;
   15509 
   15510 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15511 
   15512 	if (gate)
   15513 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   15514 	else
   15515 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   15516 
   15517 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15518 }
   15519 
   15520 static int
   15521 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   15522 {
   15523 	uint32_t fwsm, reg;
   15524 	int rv;
   15525 
   15526 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15527 		device_xname(sc->sc_dev), __func__));
   15528 
   15529 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   15530 	wm_gate_hw_phy_config_ich8lan(sc, true);
   15531 
   15532 	/* Disable ULP */
   15533 	wm_ulp_disable(sc);
   15534 
   15535 	/* Acquire PHY semaphore */
   15536 	rv = sc->phy.acquire(sc);
   15537 	if (rv != 0) {
   15538 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15539 		device_xname(sc->sc_dev), __func__));
   15540 		return rv;
   15541 	}
   15542 
   15543 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   15544 	 * inaccessible and resetting the PHY is not blocked, toggle the
   15545 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   15546 	 */
   15547 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15548 	switch (sc->sc_type) {
   15549 	case WM_T_PCH_LPT:
   15550 	case WM_T_PCH_SPT:
   15551 	case WM_T_PCH_CNP:
   15552 		if (wm_phy_is_accessible_pchlan(sc))
   15553 			break;
   15554 
   15555 		/* Before toggling LANPHYPC, see if PHY is accessible by
   15556 		 * forcing MAC to SMBus mode first.
   15557 		 */
   15558 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15559 		reg |= CTRL_EXT_FORCE_SMBUS;
   15560 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15561 #if 0
   15562 		/* XXX Isn't this required??? */
   15563 		CSR_WRITE_FLUSH(sc);
   15564 #endif
   15565 		/* Wait 50 milliseconds for MAC to finish any retries
   15566 		 * that it might be trying to perform from previous
   15567 		 * attempts to acknowledge any phy read requests.
   15568 		 */
   15569 		delay(50 * 1000);
   15570 		/* FALLTHROUGH */
   15571 	case WM_T_PCH2:
   15572 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15573 			break;
   15574 		/* FALLTHROUGH */
   15575 	case WM_T_PCH:
   15576 		if (sc->sc_type == WM_T_PCH)
   15577 			if ((fwsm & FWSM_FW_VALID) != 0)
   15578 				break;
   15579 
   15580 		if (wm_phy_resetisblocked(sc) == true) {
   15581 			device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
   15582 			break;
   15583 		}
   15584 
   15585 		/* Toggle LANPHYPC Value bit */
   15586 		wm_toggle_lanphypc_pch_lpt(sc);
   15587 
   15588 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15589 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15590 				break;
   15591 
   15592 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15593 			 * so ensure that the MAC is also out of SMBus mode
   15594 			 */
   15595 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15596 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15597 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15598 
   15599 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15600 				break;
   15601 			rv = -1;
   15602 		}
   15603 		break;
   15604 	default:
   15605 		break;
   15606 	}
   15607 
   15608 	/* Release semaphore */
   15609 	sc->phy.release(sc);
   15610 
   15611 	if (rv == 0) {
   15612 		/* Check to see if able to reset PHY.  Print error if not */
   15613 		if (wm_phy_resetisblocked(sc)) {
   15614 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15615 			goto out;
   15616 		}
   15617 
   15618 		/* Reset the PHY before any access to it.  Doing so, ensures
   15619 		 * that the PHY is in a known good state before we read/write
   15620 		 * PHY registers.  The generic reset is sufficient here,
   15621 		 * because we haven't determined the PHY type yet.
   15622 		 */
   15623 		if (wm_reset_phy(sc) != 0)
   15624 			goto out;
   15625 
   15626 		/* On a successful reset, possibly need to wait for the PHY
   15627 		 * to quiesce to an accessible state before returning control
   15628 		 * to the calling function.  If the PHY does not quiesce, then
   15629 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15630 		 *  the PHY is in.
   15631 		 */
   15632 		if (wm_phy_resetisblocked(sc))
   15633 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15634 	}
   15635 
   15636 out:
   15637 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15638 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15639 		delay(10*1000);
   15640 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15641 	}
   15642 
   15643 	return 0;
   15644 }
   15645 
   15646 static void
   15647 wm_init_manageability(struct wm_softc *sc)
   15648 {
   15649 
   15650 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15651 		device_xname(sc->sc_dev), __func__));
   15652 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15653 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15654 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15655 
   15656 		/* Disable hardware interception of ARP */
   15657 		manc &= ~MANC_ARP_EN;
   15658 
   15659 		/* Enable receiving management packets to the host */
   15660 		if (sc->sc_type >= WM_T_82571) {
   15661 			manc |= MANC_EN_MNG2HOST;
   15662 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15663 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15664 		}
   15665 
   15666 		CSR_WRITE(sc, WMREG_MANC, manc);
   15667 	}
   15668 }
   15669 
   15670 static void
   15671 wm_release_manageability(struct wm_softc *sc)
   15672 {
   15673 
   15674 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15675 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15676 
   15677 		manc |= MANC_ARP_EN;
   15678 		if (sc->sc_type >= WM_T_82571)
   15679 			manc &= ~MANC_EN_MNG2HOST;
   15680 
   15681 		CSR_WRITE(sc, WMREG_MANC, manc);
   15682 	}
   15683 }
   15684 
   15685 static void
   15686 wm_get_wakeup(struct wm_softc *sc)
   15687 {
   15688 
   15689 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15690 	switch (sc->sc_type) {
   15691 	case WM_T_82573:
   15692 	case WM_T_82583:
   15693 		sc->sc_flags |= WM_F_HAS_AMT;
   15694 		/* FALLTHROUGH */
   15695 	case WM_T_80003:
   15696 	case WM_T_82575:
   15697 	case WM_T_82576:
   15698 	case WM_T_82580:
   15699 	case WM_T_I350:
   15700 	case WM_T_I354:
   15701 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15702 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15703 		/* FALLTHROUGH */
   15704 	case WM_T_82541:
   15705 	case WM_T_82541_2:
   15706 	case WM_T_82547:
   15707 	case WM_T_82547_2:
   15708 	case WM_T_82571:
   15709 	case WM_T_82572:
   15710 	case WM_T_82574:
   15711 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15712 		break;
   15713 	case WM_T_ICH8:
   15714 	case WM_T_ICH9:
   15715 	case WM_T_ICH10:
   15716 	case WM_T_PCH:
   15717 	case WM_T_PCH2:
   15718 	case WM_T_PCH_LPT:
   15719 	case WM_T_PCH_SPT:
   15720 	case WM_T_PCH_CNP:
   15721 		sc->sc_flags |= WM_F_HAS_AMT;
   15722 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15723 		break;
   15724 	default:
   15725 		break;
   15726 	}
   15727 
   15728 	/* 1: HAS_MANAGE */
   15729 	if (wm_enable_mng_pass_thru(sc) != 0)
   15730 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15731 
   15732 	/*
   15733 	 * Note that the WOL flags is set after the resetting of the eeprom
   15734 	 * stuff
   15735 	 */
   15736 }
   15737 
   15738 /*
   15739  * Unconfigure Ultra Low Power mode.
   15740  * Only for I217 and newer (see below).
   15741  */
   15742 static int
   15743 wm_ulp_disable(struct wm_softc *sc)
   15744 {
   15745 	uint32_t reg;
   15746 	uint16_t phyreg;
   15747 	int i = 0, rv;
   15748 
   15749 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15750 		device_xname(sc->sc_dev), __func__));
   15751 	/* Exclude old devices */
   15752 	if ((sc->sc_type < WM_T_PCH_LPT)
   15753 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15754 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15755 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15756 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15757 		return 0;
   15758 
   15759 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15760 		/* Request ME un-configure ULP mode in the PHY */
   15761 		reg = CSR_READ(sc, WMREG_H2ME);
   15762 		reg &= ~H2ME_ULP;
   15763 		reg |= H2ME_ENFORCE_SETTINGS;
   15764 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15765 
   15766 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15767 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15768 			if (i++ == 30) {
   15769 				device_printf(sc->sc_dev, "%s timed out\n",
   15770 				    __func__);
   15771 				return -1;
   15772 			}
   15773 			delay(10 * 1000);
   15774 		}
   15775 		reg = CSR_READ(sc, WMREG_H2ME);
   15776 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15777 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15778 
   15779 		return 0;
   15780 	}
   15781 
   15782 	/* Acquire semaphore */
   15783 	rv = sc->phy.acquire(sc);
   15784 	if (rv != 0) {
   15785 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15786 		device_xname(sc->sc_dev), __func__));
   15787 		return rv;
   15788 	}
   15789 
   15790 	/* Toggle LANPHYPC */
   15791 	wm_toggle_lanphypc_pch_lpt(sc);
   15792 
   15793 	/* Unforce SMBus mode in PHY */
   15794 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15795 	if (rv != 0) {
   15796 		uint32_t reg2;
   15797 
   15798 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15799 			__func__);
   15800 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15801 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15802 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15803 		delay(50 * 1000);
   15804 
   15805 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15806 		    &phyreg);
   15807 		if (rv != 0)
   15808 			goto release;
   15809 	}
   15810 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15811 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15812 
   15813 	/* Unforce SMBus mode in MAC */
   15814 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15815 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15816 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15817 
   15818 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15819 	if (rv != 0)
   15820 		goto release;
   15821 	phyreg |= HV_PM_CTRL_K1_ENA;
   15822 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15823 
   15824 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15825 		&phyreg);
   15826 	if (rv != 0)
   15827 		goto release;
   15828 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15829 	    | I218_ULP_CONFIG1_STICKY_ULP
   15830 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15831 	    | I218_ULP_CONFIG1_WOL_HOST
   15832 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15833 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15834 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15835 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15836 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15837 	phyreg |= I218_ULP_CONFIG1_START;
   15838 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15839 
   15840 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15841 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15842 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15843 
   15844 release:
   15845 	/* Release semaphore */
   15846 	sc->phy.release(sc);
   15847 	wm_gmii_reset(sc);
   15848 	delay(50 * 1000);
   15849 
   15850 	return rv;
   15851 }
   15852 
   15853 /* WOL in the newer chipset interfaces (pchlan) */
   15854 static int
   15855 wm_enable_phy_wakeup(struct wm_softc *sc)
   15856 {
   15857 	device_t dev = sc->sc_dev;
   15858 	uint32_t mreg, moff;
   15859 	uint16_t wuce, wuc, wufc, preg;
   15860 	int i, rv;
   15861 
   15862 	KASSERT(sc->sc_type >= WM_T_PCH);
   15863 
   15864 	/* Copy MAC RARs to PHY RARs */
   15865 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15866 
   15867 	/* Activate PHY wakeup */
   15868 	rv = sc->phy.acquire(sc);
   15869 	if (rv != 0) {
   15870 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15871 		    __func__);
   15872 		return rv;
   15873 	}
   15874 
   15875 	/*
   15876 	 * Enable access to PHY wakeup registers.
   15877 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15878 	 */
   15879 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15880 	if (rv != 0) {
   15881 		device_printf(dev,
   15882 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15883 		goto release;
   15884 	}
   15885 
   15886 	/* Copy MAC MTA to PHY MTA */
   15887 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15888 		uint16_t lo, hi;
   15889 
   15890 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15891 		lo = (uint16_t)(mreg & 0xffff);
   15892 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15893 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15894 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15895 	}
   15896 
   15897 	/* Configure PHY Rx Control register */
   15898 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15899 	mreg = CSR_READ(sc, WMREG_RCTL);
   15900 	if (mreg & RCTL_UPE)
   15901 		preg |= BM_RCTL_UPE;
   15902 	if (mreg & RCTL_MPE)
   15903 		preg |= BM_RCTL_MPE;
   15904 	preg &= ~(BM_RCTL_MO_MASK);
   15905 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15906 	if (moff != 0)
   15907 		preg |= moff << BM_RCTL_MO_SHIFT;
   15908 	if (mreg & RCTL_BAM)
   15909 		preg |= BM_RCTL_BAM;
   15910 	if (mreg & RCTL_PMCF)
   15911 		preg |= BM_RCTL_PMCF;
   15912 	mreg = CSR_READ(sc, WMREG_CTRL);
   15913 	if (mreg & CTRL_RFCE)
   15914 		preg |= BM_RCTL_RFCE;
   15915 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15916 
   15917 	wuc = WUC_APME | WUC_PME_EN;
   15918 	wufc = WUFC_MAG;
   15919 	/* Enable PHY wakeup in MAC register */
   15920 	CSR_WRITE(sc, WMREG_WUC,
   15921 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15922 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15923 
   15924 	/* Configure and enable PHY wakeup in PHY registers */
   15925 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15926 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15927 
   15928 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15929 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15930 
   15931 release:
   15932 	sc->phy.release(sc);
   15933 
   15934 	return 0;
   15935 }
   15936 
   15937 /* Power down workaround on D3 */
   15938 static void
   15939 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15940 {
   15941 	uint32_t reg;
   15942 	uint16_t phyreg;
   15943 	int i;
   15944 
   15945 	for (i = 0; i < 2; i++) {
   15946 		/* Disable link */
   15947 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15948 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15949 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15950 
   15951 		/*
   15952 		 * Call gig speed drop workaround on Gig disable before
   15953 		 * accessing any PHY registers
   15954 		 */
   15955 		if (sc->sc_type == WM_T_ICH8)
   15956 			wm_gig_downshift_workaround_ich8lan(sc);
   15957 
   15958 		/* Write VR power-down enable */
   15959 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15960 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15961 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15962 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15963 
   15964 		/* Read it back and test */
   15965 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15966 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15967 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15968 			break;
   15969 
   15970 		/* Issue PHY reset and repeat at most one more time */
   15971 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15972 	}
   15973 }
   15974 
   15975 /*
   15976  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15977  *  @sc: pointer to the HW structure
   15978  *
   15979  *  During S0 to Sx transition, it is possible the link remains at gig
   15980  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15981  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15982  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15983  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15984  *  needs to be written.
   15985  *  Parts that support (and are linked to a partner which support) EEE in
   15986  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15987  *  than 10Mbps w/o EEE.
   15988  */
   15989 static void
   15990 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15991 {
   15992 	device_t dev = sc->sc_dev;
   15993 	struct ethercom *ec = &sc->sc_ethercom;
   15994 	uint32_t phy_ctrl;
   15995 	int rv;
   15996 
   15997 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15998 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15999 
   16000 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   16001 
   16002 	if (sc->sc_phytype == WMPHY_I217) {
   16003 		uint16_t devid = sc->sc_pcidevid;
   16004 
   16005 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   16006 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   16007 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   16008 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   16009 		    (sc->sc_type >= WM_T_PCH_SPT))
   16010 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   16011 			    CSR_READ(sc, WMREG_FEXTNVM6)
   16012 			    & ~FEXTNVM6_REQ_PLL_CLK);
   16013 
   16014 		if (sc->phy.acquire(sc) != 0)
   16015 			goto out;
   16016 
   16017 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16018 			uint16_t eee_advert;
   16019 
   16020 			rv = wm_read_emi_reg_locked(dev,
   16021 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   16022 			if (rv)
   16023 				goto release;
   16024 
   16025 			/*
   16026 			 * Disable LPLU if both link partners support 100BaseT
   16027 			 * EEE and 100Full is advertised on both ends of the
   16028 			 * link, and enable Auto Enable LPI since there will
   16029 			 * be no driver to enable LPI while in Sx.
   16030 			 */
   16031 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   16032 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   16033 				uint16_t anar, phy_reg;
   16034 
   16035 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   16036 				    &anar);
   16037 				if (anar & ANAR_TX_FD) {
   16038 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   16039 					    PHY_CTRL_NOND0A_LPLU);
   16040 
   16041 					/* Set Auto Enable LPI after link up */
   16042 					sc->phy.readreg_locked(dev, 2,
   16043 					    I217_LPI_GPIO_CTRL, &phy_reg);
   16044 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16045 					sc->phy.writereg_locked(dev, 2,
   16046 					    I217_LPI_GPIO_CTRL, phy_reg);
   16047 				}
   16048 			}
   16049 		}
   16050 
   16051 		/*
   16052 		 * For i217 Intel Rapid Start Technology support,
   16053 		 * when the system is going into Sx and no manageability engine
   16054 		 * is present, the driver must configure proxy to reset only on
   16055 		 * power good.	LPI (Low Power Idle) state must also reset only
   16056 		 * on power good, as well as the MTA (Multicast table array).
   16057 		 * The SMBus release must also be disabled on LCD reset.
   16058 		 */
   16059 
   16060 		/*
   16061 		 * Enable MTA to reset for Intel Rapid Start Technology
   16062 		 * Support
   16063 		 */
   16064 
   16065 release:
   16066 		sc->phy.release(sc);
   16067 	}
   16068 out:
   16069 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   16070 
   16071 	if (sc->sc_type == WM_T_ICH8)
   16072 		wm_gig_downshift_workaround_ich8lan(sc);
   16073 
   16074 	if (sc->sc_type >= WM_T_PCH) {
   16075 		wm_oem_bits_config_ich8lan(sc, false);
   16076 
   16077 		/* Reset PHY to activate OEM bits on 82577/8 */
   16078 		if (sc->sc_type == WM_T_PCH)
   16079 			wm_reset_phy(sc);
   16080 
   16081 		if (sc->phy.acquire(sc) != 0)
   16082 			return;
   16083 		wm_write_smbus_addr(sc);
   16084 		sc->phy.release(sc);
   16085 	}
   16086 }
   16087 
   16088 /*
   16089  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   16090  *  @sc: pointer to the HW structure
   16091  *
   16092  *  During Sx to S0 transitions on non-managed devices or managed devices
   16093  *  on which PHY resets are not blocked, if the PHY registers cannot be
   16094  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   16095  *  the PHY.
   16096  *  On i217, setup Intel Rapid Start Technology.
   16097  */
   16098 static int
   16099 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   16100 {
   16101 	device_t dev = sc->sc_dev;
   16102 	int rv;
   16103 
   16104 	if (sc->sc_type < WM_T_PCH2)
   16105 		return 0;
   16106 
   16107 	rv = wm_init_phy_workarounds_pchlan(sc);
   16108 	if (rv != 0)
   16109 		return rv;
   16110 
   16111 	/* For i217 Intel Rapid Start Technology support when the system
   16112 	 * is transitioning from Sx and no manageability engine is present
   16113 	 * configure SMBus to restore on reset, disable proxy, and enable
   16114 	 * the reset on MTA (Multicast table array).
   16115 	 */
   16116 	if (sc->sc_phytype == WMPHY_I217) {
   16117 		uint16_t phy_reg;
   16118 
   16119 		rv = sc->phy.acquire(sc);
   16120 		if (rv != 0)
   16121 			return rv;
   16122 
   16123 		/* Clear Auto Enable LPI after link up */
   16124 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   16125 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16126 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   16127 
   16128 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16129 			/* Restore clear on SMB if no manageability engine
   16130 			 * is present
   16131 			 */
   16132 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   16133 			    &phy_reg);
   16134 			if (rv != 0)
   16135 				goto release;
   16136 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   16137 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   16138 
   16139 			/* Disable Proxy */
   16140 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   16141 		}
   16142 		/* Enable reset on MTA */
   16143 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   16144 		if (rv != 0)
   16145 			goto release;
   16146 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   16147 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   16148 
   16149 release:
   16150 		sc->phy.release(sc);
   16151 		return rv;
   16152 	}
   16153 
   16154 	return 0;
   16155 }
   16156 
   16157 static void
   16158 wm_enable_wakeup(struct wm_softc *sc)
   16159 {
   16160 	uint32_t reg, pmreg;
   16161 	pcireg_t pmode;
   16162 	int rv = 0;
   16163 
   16164 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16165 		device_xname(sc->sc_dev), __func__));
   16166 
   16167 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16168 	    &pmreg, NULL) == 0)
   16169 		return;
   16170 
   16171 	if ((sc->sc_flags & WM_F_WOL) == 0)
   16172 		goto pme;
   16173 
   16174 	/* Advertise the wakeup capability */
   16175 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   16176 	    | CTRL_SWDPIN(3));
   16177 
   16178 	/* Keep the laser running on fiber adapters */
   16179 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   16180 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   16181 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16182 		reg |= CTRL_EXT_SWDPIN(3);
   16183 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16184 	}
   16185 
   16186 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   16187 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   16188 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   16189 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   16190 		wm_suspend_workarounds_ich8lan(sc);
   16191 
   16192 #if 0	/* For the multicast packet */
   16193 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   16194 	reg |= WUFC_MC;
   16195 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   16196 #endif
   16197 
   16198 	if (sc->sc_type >= WM_T_PCH) {
   16199 		rv = wm_enable_phy_wakeup(sc);
   16200 		if (rv != 0)
   16201 			goto pme;
   16202 	} else {
   16203 		/* Enable wakeup by the MAC */
   16204 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   16205 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   16206 	}
   16207 
   16208 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   16209 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   16210 		|| (sc->sc_type == WM_T_PCH2))
   16211 	    && (sc->sc_phytype == WMPHY_IGP_3))
   16212 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   16213 
   16214 pme:
   16215 	/* Request PME */
   16216 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   16217 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   16218 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   16219 		/* For WOL */
   16220 		pmode |= PCI_PMCSR_PME_EN;
   16221 	} else {
   16222 		/* Disable WOL */
   16223 		pmode &= ~PCI_PMCSR_PME_EN;
   16224 	}
   16225 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   16226 }
   16227 
   16228 /* Disable ASPM L0s and/or L1 for workaround */
   16229 static void
   16230 wm_disable_aspm(struct wm_softc *sc)
   16231 {
   16232 	pcireg_t reg, mask = 0;
   16233 	unsigned const char *str = "";
   16234 
   16235 	/*
   16236 	 *  Only for PCIe device which has PCIe capability in the PCI config
   16237 	 * space.
   16238 	 */
   16239 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   16240 		return;
   16241 
   16242 	switch (sc->sc_type) {
   16243 	case WM_T_82571:
   16244 	case WM_T_82572:
   16245 		/*
   16246 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   16247 		 * State Power management L1 State (ASPM L1).
   16248 		 */
   16249 		mask = PCIE_LCSR_ASPM_L1;
   16250 		str = "L1 is";
   16251 		break;
   16252 	case WM_T_82573:
   16253 	case WM_T_82574:
   16254 	case WM_T_82583:
   16255 		/*
   16256 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   16257 		 *
   16258 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   16259 		 * some chipset.  The document of 82574 and 82583 says that
   16260 		 * disabling L0s with some specific chipset is sufficient,
   16261 		 * but we follow as of the Intel em driver does.
   16262 		 *
   16263 		 * References:
   16264 		 * Errata 8 of the Specification Update of i82573.
   16265 		 * Errata 20 of the Specification Update of i82574.
   16266 		 * Errata 9 of the Specification Update of i82583.
   16267 		 */
   16268 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   16269 		str = "L0s and L1 are";
   16270 		break;
   16271 	default:
   16272 		return;
   16273 	}
   16274 
   16275 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16276 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   16277 	reg &= ~mask;
   16278 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16279 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   16280 
   16281 	/* Print only in wm_attach() */
   16282 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   16283 		aprint_verbose_dev(sc->sc_dev,
   16284 		    "ASPM %s disabled to workaround the errata.\n", str);
   16285 }
   16286 
   16287 /* LPLU */
   16288 
   16289 static void
   16290 wm_lplu_d0_disable(struct wm_softc *sc)
   16291 {
   16292 	struct mii_data *mii = &sc->sc_mii;
   16293 	uint32_t reg;
   16294 	uint16_t phyval;
   16295 
   16296 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16297 		device_xname(sc->sc_dev), __func__));
   16298 
   16299 	if (sc->sc_phytype == WMPHY_IFE)
   16300 		return;
   16301 
   16302 	switch (sc->sc_type) {
   16303 	case WM_T_82571:
   16304 	case WM_T_82572:
   16305 	case WM_T_82573:
   16306 	case WM_T_82575:
   16307 	case WM_T_82576:
   16308 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   16309 		phyval &= ~PMR_D0_LPLU;
   16310 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   16311 		break;
   16312 	case WM_T_82580:
   16313 	case WM_T_I350:
   16314 	case WM_T_I210:
   16315 	case WM_T_I211:
   16316 		reg = CSR_READ(sc, WMREG_PHPM);
   16317 		reg &= ~PHPM_D0A_LPLU;
   16318 		CSR_WRITE(sc, WMREG_PHPM, reg);
   16319 		break;
   16320 	case WM_T_82574:
   16321 	case WM_T_82583:
   16322 	case WM_T_ICH8:
   16323 	case WM_T_ICH9:
   16324 	case WM_T_ICH10:
   16325 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16326 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   16327 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16328 		CSR_WRITE_FLUSH(sc);
   16329 		break;
   16330 	case WM_T_PCH:
   16331 	case WM_T_PCH2:
   16332 	case WM_T_PCH_LPT:
   16333 	case WM_T_PCH_SPT:
   16334 	case WM_T_PCH_CNP:
   16335 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   16336 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   16337 		if (wm_phy_resetisblocked(sc) == false)
   16338 			phyval |= HV_OEM_BITS_ANEGNOW;
   16339 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   16340 		break;
   16341 	default:
   16342 		break;
   16343 	}
   16344 }
   16345 
   16346 /* EEE */
   16347 
   16348 static int
   16349 wm_set_eee_i350(struct wm_softc *sc)
   16350 {
   16351 	struct ethercom *ec = &sc->sc_ethercom;
   16352 	uint32_t ipcnfg, eeer;
   16353 	uint32_t ipcnfg_mask
   16354 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   16355 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   16356 
   16357 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   16358 
   16359 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   16360 	eeer = CSR_READ(sc, WMREG_EEER);
   16361 
   16362 	/* Enable or disable per user setting */
   16363 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16364 		ipcnfg |= ipcnfg_mask;
   16365 		eeer |= eeer_mask;
   16366 	} else {
   16367 		ipcnfg &= ~ipcnfg_mask;
   16368 		eeer &= ~eeer_mask;
   16369 	}
   16370 
   16371 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   16372 	CSR_WRITE(sc, WMREG_EEER, eeer);
   16373 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   16374 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   16375 
   16376 	return 0;
   16377 }
   16378 
   16379 static int
   16380 wm_set_eee_pchlan(struct wm_softc *sc)
   16381 {
   16382 	device_t dev = sc->sc_dev;
   16383 	struct ethercom *ec = &sc->sc_ethercom;
   16384 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   16385 	int rv;
   16386 
   16387 	switch (sc->sc_phytype) {
   16388 	case WMPHY_82579:
   16389 		lpa = I82579_EEE_LP_ABILITY;
   16390 		pcs_status = I82579_EEE_PCS_STATUS;
   16391 		adv_addr = I82579_EEE_ADVERTISEMENT;
   16392 		break;
   16393 	case WMPHY_I217:
   16394 		lpa = I217_EEE_LP_ABILITY;
   16395 		pcs_status = I217_EEE_PCS_STATUS;
   16396 		adv_addr = I217_EEE_ADVERTISEMENT;
   16397 		break;
   16398 	default:
   16399 		return 0;
   16400 	}
   16401 
   16402 	if (sc->phy.acquire(sc)) {
   16403 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   16404 		return 0;
   16405 	}
   16406 
   16407 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   16408 	if (rv != 0)
   16409 		goto release;
   16410 
   16411 	/* Clear bits that enable EEE in various speeds */
   16412 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   16413 
   16414 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16415 		/* Save off link partner's EEE ability */
   16416 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   16417 		if (rv != 0)
   16418 			goto release;
   16419 
   16420 		/* Read EEE advertisement */
   16421 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   16422 			goto release;
   16423 
   16424 		/*
   16425 		 * Enable EEE only for speeds in which the link partner is
   16426 		 * EEE capable and for which we advertise EEE.
   16427 		 */
   16428 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   16429 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   16430 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   16431 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   16432 			if ((data & ANLPAR_TX_FD) != 0)
   16433 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   16434 			else {
   16435 				/*
   16436 				 * EEE is not supported in 100Half, so ignore
   16437 				 * partner's EEE in 100 ability if full-duplex
   16438 				 * is not advertised.
   16439 				 */
   16440 				sc->eee_lp_ability
   16441 				    &= ~AN_EEEADVERT_100_TX;
   16442 			}
   16443 		}
   16444 	}
   16445 
   16446 	if (sc->sc_phytype == WMPHY_82579) {
   16447 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   16448 		if (rv != 0)
   16449 			goto release;
   16450 
   16451 		data &= ~I82579_LPI_PLL_SHUT_100;
   16452 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   16453 	}
   16454 
   16455 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   16456 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   16457 		goto release;
   16458 
   16459 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   16460 release:
   16461 	sc->phy.release(sc);
   16462 
   16463 	return rv;
   16464 }
   16465 
   16466 static int
   16467 wm_set_eee(struct wm_softc *sc)
   16468 {
   16469 	struct ethercom *ec = &sc->sc_ethercom;
   16470 
   16471 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   16472 		return 0;
   16473 
   16474 	if (sc->sc_type == WM_T_I354) {
   16475 		/* I354 uses an external PHY */
   16476 		return 0; /* not yet */
   16477 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   16478 		return wm_set_eee_i350(sc);
   16479 	else if (sc->sc_type >= WM_T_PCH2)
   16480 		return wm_set_eee_pchlan(sc);
   16481 
   16482 	return 0;
   16483 }
   16484 
   16485 /*
   16486  * Workarounds (mainly PHY related).
   16487  * Basically, PHY's workarounds are in the PHY drivers.
   16488  */
   16489 
   16490 /* Workaround for 82566 Kumeran PCS lock loss */
   16491 static int
   16492 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   16493 {
   16494 	struct mii_data *mii = &sc->sc_mii;
   16495 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16496 	int i, reg, rv;
   16497 	uint16_t phyreg;
   16498 
   16499 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16500 		device_xname(sc->sc_dev), __func__));
   16501 
   16502 	/* If the link is not up, do nothing */
   16503 	if ((status & STATUS_LU) == 0)
   16504 		return 0;
   16505 
   16506 	/* Nothing to do if the link is other than 1Gbps */
   16507 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   16508 		return 0;
   16509 
   16510 	for (i = 0; i < 10; i++) {
   16511 		/* read twice */
   16512 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16513 		if (rv != 0)
   16514 			return rv;
   16515 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16516 		if (rv != 0)
   16517 			return rv;
   16518 
   16519 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   16520 			goto out;	/* GOOD! */
   16521 
   16522 		/* Reset the PHY */
   16523 		wm_reset_phy(sc);
   16524 		delay(5*1000);
   16525 	}
   16526 
   16527 	/* Disable GigE link negotiation */
   16528 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16529 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16530 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16531 
   16532 	/*
   16533 	 * Call gig speed drop workaround on Gig disable before accessing
   16534 	 * any PHY registers.
   16535 	 */
   16536 	wm_gig_downshift_workaround_ich8lan(sc);
   16537 
   16538 out:
   16539 	return 0;
   16540 }
   16541 
   16542 /*
   16543  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   16544  *  @sc: pointer to the HW structure
   16545  *
   16546  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   16547  *  LPLU, Gig disable, MDIC PHY reset):
   16548  *    1) Set Kumeran Near-end loopback
   16549  *    2) Clear Kumeran Near-end loopback
   16550  *  Should only be called for ICH8[m] devices with any 1G Phy.
   16551  */
   16552 static void
   16553 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   16554 {
   16555 	uint16_t kmreg;
   16556 
   16557 	/* Only for igp3 */
   16558 	if (sc->sc_phytype == WMPHY_IGP_3) {
   16559 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   16560 			return;
   16561 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   16562 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   16563 			return;
   16564 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   16565 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   16566 	}
   16567 }
   16568 
   16569 /*
   16570  * Workaround for pch's PHYs
   16571  * XXX should be moved to new PHY driver?
   16572  */
   16573 static int
   16574 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16575 {
   16576 	device_t dev = sc->sc_dev;
   16577 	struct mii_data *mii = &sc->sc_mii;
   16578 	struct mii_softc *child;
   16579 	uint16_t phy_data, phyrev = 0;
   16580 	int phytype = sc->sc_phytype;
   16581 	int rv;
   16582 
   16583 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16584 		device_xname(dev), __func__));
   16585 	KASSERT(sc->sc_type == WM_T_PCH);
   16586 
   16587 	/* Set MDIO slow mode before any other MDIO access */
   16588 	if (phytype == WMPHY_82577)
   16589 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16590 			return rv;
   16591 
   16592 	child = LIST_FIRST(&mii->mii_phys);
   16593 	if (child != NULL)
   16594 		phyrev = child->mii_mpd_rev;
   16595 
   16596 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16597 	if ((child != NULL) &&
   16598 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16599 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16600 		/* Disable generation of early preamble (0x4431) */
   16601 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16602 		    &phy_data);
   16603 		if (rv != 0)
   16604 			return rv;
   16605 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16606 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16607 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16608 		    phy_data);
   16609 		if (rv != 0)
   16610 			return rv;
   16611 
   16612 		/* Preamble tuning for SSC */
   16613 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16614 		if (rv != 0)
   16615 			return rv;
   16616 	}
   16617 
   16618 	/* 82578 */
   16619 	if (phytype == WMPHY_82578) {
   16620 		/*
   16621 		 * Return registers to default by doing a soft reset then
   16622 		 * writing 0x3140 to the control register
   16623 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16624 		 */
   16625 		if ((child != NULL) && (phyrev < 2)) {
   16626 			PHY_RESET(child);
   16627 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16628 			if (rv != 0)
   16629 				return rv;
   16630 		}
   16631 	}
   16632 
   16633 	/* Select page 0 */
   16634 	if ((rv = sc->phy.acquire(sc)) != 0)
   16635 		return rv;
   16636 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16637 	sc->phy.release(sc);
   16638 	if (rv != 0)
   16639 		return rv;
   16640 
   16641 	/*
   16642 	 * Configure the K1 Si workaround during phy reset assuming there is
   16643 	 * link so that it disables K1 if link is in 1Gbps.
   16644 	 */
   16645 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16646 		return rv;
   16647 
   16648 	/* Workaround for link disconnects on a busy hub in half duplex */
   16649 	rv = sc->phy.acquire(sc);
   16650 	if (rv)
   16651 		return rv;
   16652 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16653 	if (rv)
   16654 		goto release;
   16655 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16656 	    phy_data & 0x00ff);
   16657 	if (rv)
   16658 		goto release;
   16659 
   16660 	/* Set MSE higher to enable link to stay up when noise is high */
   16661 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16662 release:
   16663 	sc->phy.release(sc);
   16664 
   16665 	return rv;
   16666 }
   16667 
   16668 /*
   16669  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16670  *  @sc:   pointer to the HW structure
   16671  */
   16672 static void
   16673 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16674 {
   16675 
   16676 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16677 		device_xname(sc->sc_dev), __func__));
   16678 
   16679 	if (sc->phy.acquire(sc) != 0)
   16680 		return;
   16681 
   16682 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16683 
   16684 	sc->phy.release(sc);
   16685 }
   16686 
   16687 static void
   16688 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16689 {
   16690 	device_t dev = sc->sc_dev;
   16691 	uint32_t mac_reg;
   16692 	uint16_t i, wuce;
   16693 	int count;
   16694 
   16695 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16696 		device_xname(dev), __func__));
   16697 
   16698 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16699 		return;
   16700 
   16701 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16702 	count = wm_rar_count(sc);
   16703 	for (i = 0; i < count; i++) {
   16704 		uint16_t lo, hi;
   16705 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16706 		lo = (uint16_t)(mac_reg & 0xffff);
   16707 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16708 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16709 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16710 
   16711 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16712 		lo = (uint16_t)(mac_reg & 0xffff);
   16713 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16714 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16715 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16716 	}
   16717 
   16718 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16719 }
   16720 
   16721 /*
   16722  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16723  *  with 82579 PHY
   16724  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16725  */
   16726 static int
   16727 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16728 {
   16729 	device_t dev = sc->sc_dev;
   16730 	int rar_count;
   16731 	int rv;
   16732 	uint32_t mac_reg;
   16733 	uint16_t dft_ctrl, data;
   16734 	uint16_t i;
   16735 
   16736 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16737 		device_xname(dev), __func__));
   16738 
   16739 	if (sc->sc_type < WM_T_PCH2)
   16740 		return 0;
   16741 
   16742 	/* Acquire PHY semaphore */
   16743 	rv = sc->phy.acquire(sc);
   16744 	if (rv != 0)
   16745 		return rv;
   16746 
   16747 	/* Disable Rx path while enabling/disabling workaround */
   16748 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16749 	if (rv != 0)
   16750 		goto out;
   16751 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16752 	    dft_ctrl | (1 << 14));
   16753 	if (rv != 0)
   16754 		goto out;
   16755 
   16756 	if (enable) {
   16757 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16758 		 * SHRAL/H) and initial CRC values to the MAC
   16759 		 */
   16760 		rar_count = wm_rar_count(sc);
   16761 		for (i = 0; i < rar_count; i++) {
   16762 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16763 			uint32_t addr_high, addr_low;
   16764 
   16765 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16766 			if (!(addr_high & RAL_AV))
   16767 				continue;
   16768 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16769 			mac_addr[0] = (addr_low & 0xFF);
   16770 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16771 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16772 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16773 			mac_addr[4] = (addr_high & 0xFF);
   16774 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16775 
   16776 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16777 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16778 		}
   16779 
   16780 		/* Write Rx addresses to the PHY */
   16781 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16782 	}
   16783 
   16784 	/*
   16785 	 * If enable ==
   16786 	 *	true: Enable jumbo frame workaround in the MAC.
   16787 	 *	false: Write MAC register values back to h/w defaults.
   16788 	 */
   16789 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16790 	if (enable) {
   16791 		mac_reg &= ~(1 << 14);
   16792 		mac_reg |= (7 << 15);
   16793 	} else
   16794 		mac_reg &= ~(0xf << 14);
   16795 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16796 
   16797 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16798 	if (enable) {
   16799 		mac_reg |= RCTL_SECRC;
   16800 		sc->sc_rctl |= RCTL_SECRC;
   16801 		sc->sc_flags |= WM_F_CRC_STRIP;
   16802 	} else {
   16803 		mac_reg &= ~RCTL_SECRC;
   16804 		sc->sc_rctl &= ~RCTL_SECRC;
   16805 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16806 	}
   16807 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16808 
   16809 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16810 	if (rv != 0)
   16811 		goto out;
   16812 	if (enable)
   16813 		data |= 1 << 0;
   16814 	else
   16815 		data &= ~(1 << 0);
   16816 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16817 	if (rv != 0)
   16818 		goto out;
   16819 
   16820 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16821 	if (rv != 0)
   16822 		goto out;
   16823 	/*
   16824 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16825 	 * on both the enable case and the disable case. Is it correct?
   16826 	 */
   16827 	data &= ~(0xf << 8);
   16828 	data |= (0xb << 8);
   16829 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16830 	if (rv != 0)
   16831 		goto out;
   16832 
   16833 	/*
   16834 	 * If enable ==
   16835 	 *	true: Enable jumbo frame workaround in the PHY.
   16836 	 *	false: Write PHY register values back to h/w defaults.
   16837 	 */
   16838 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16839 	if (rv != 0)
   16840 		goto out;
   16841 	data &= ~(0x7F << 5);
   16842 	if (enable)
   16843 		data |= (0x37 << 5);
   16844 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16845 	if (rv != 0)
   16846 		goto out;
   16847 
   16848 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16849 	if (rv != 0)
   16850 		goto out;
   16851 	if (enable)
   16852 		data &= ~(1 << 13);
   16853 	else
   16854 		data |= (1 << 13);
   16855 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16856 	if (rv != 0)
   16857 		goto out;
   16858 
   16859 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16860 	if (rv != 0)
   16861 		goto out;
   16862 	data &= ~(0x3FF << 2);
   16863 	if (enable)
   16864 		data |= (I82579_TX_PTR_GAP << 2);
   16865 	else
   16866 		data |= (0x8 << 2);
   16867 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16868 	if (rv != 0)
   16869 		goto out;
   16870 
   16871 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16872 	    enable ? 0xf100 : 0x7e00);
   16873 	if (rv != 0)
   16874 		goto out;
   16875 
   16876 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16877 	if (rv != 0)
   16878 		goto out;
   16879 	if (enable)
   16880 		data |= 1 << 10;
   16881 	else
   16882 		data &= ~(1 << 10);
   16883 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16884 	if (rv != 0)
   16885 		goto out;
   16886 
   16887 	/* Re-enable Rx path after enabling/disabling workaround */
   16888 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16889 	    dft_ctrl & ~(1 << 14));
   16890 
   16891 out:
   16892 	sc->phy.release(sc);
   16893 
   16894 	return rv;
   16895 }
   16896 
   16897 /*
   16898  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16899  *  done after every PHY reset.
   16900  */
   16901 static int
   16902 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16903 {
   16904 	device_t dev = sc->sc_dev;
   16905 	int rv;
   16906 
   16907 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16908 		device_xname(dev), __func__));
   16909 	KASSERT(sc->sc_type == WM_T_PCH2);
   16910 
   16911 	/* Set MDIO slow mode before any other MDIO access */
   16912 	rv = wm_set_mdio_slow_mode_hv(sc);
   16913 	if (rv != 0)
   16914 		return rv;
   16915 
   16916 	rv = sc->phy.acquire(sc);
   16917 	if (rv != 0)
   16918 		return rv;
   16919 	/* Set MSE higher to enable link to stay up when noise is high */
   16920 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16921 	if (rv != 0)
   16922 		goto release;
   16923 	/* Drop link after 5 times MSE threshold was reached */
   16924 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16925 release:
   16926 	sc->phy.release(sc);
   16927 
   16928 	return rv;
   16929 }
   16930 
   16931 /**
   16932  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16933  *  @link: link up bool flag
   16934  *
   16935  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16936  *  preventing further DMA write requests.  Workaround the issue by disabling
   16937  *  the de-assertion of the clock request when in 1Gpbs mode.
   16938  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   16939  *  speeds in order to avoid Tx hangs.
   16940  **/
   16941 static int
   16942 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   16943 {
   16944 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   16945 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16946 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   16947 	uint16_t phyreg;
   16948 
   16949 	if (link && (speed == STATUS_SPEED_1000)) {
   16950 		sc->phy.acquire(sc);
   16951 		int rv = wm_kmrn_readreg_locked(sc,
   16952 		    KUMCTRLSTA_OFFSET_K1_CONFIG, &phyreg);
   16953 		if (rv != 0)
   16954 			goto release;
   16955 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16956 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   16957 		if (rv != 0)
   16958 			goto release;
   16959 		delay(20);
   16960 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   16961 
   16962 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16963 		    &phyreg);
   16964 release:
   16965 		sc->phy.release(sc);
   16966 		return rv;
   16967 	}
   16968 
   16969 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   16970 
   16971 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   16972 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   16973 	    || !link
   16974 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   16975 		goto update_fextnvm6;
   16976 
   16977 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   16978 
   16979 	/* Clear link status transmit timeout */
   16980 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   16981 	if (speed == STATUS_SPEED_100) {
   16982 		/* Set inband Tx timeout to 5x10us for 100Half */
   16983 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16984 
   16985 		/* Do not extend the K1 entry latency for 100Half */
   16986 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16987 	} else {
   16988 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   16989 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16990 
   16991 		/* Extend the K1 entry latency for 10 Mbps */
   16992 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16993 	}
   16994 
   16995 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   16996 
   16997 update_fextnvm6:
   16998 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   16999 	return 0;
   17000 }
   17001 
   17002 /*
   17003  *  wm_k1_gig_workaround_hv - K1 Si workaround
   17004  *  @sc:   pointer to the HW structure
   17005  *  @link: link up bool flag
   17006  *
   17007  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   17008  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   17009  *  If link is down, the function will restore the default K1 setting located
   17010  *  in the NVM.
   17011  */
   17012 static int
   17013 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   17014 {
   17015 	int k1_enable = sc->sc_nvm_k1_enabled;
   17016 	int rv;
   17017 
   17018 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17019 		device_xname(sc->sc_dev), __func__));
   17020 
   17021 	rv = sc->phy.acquire(sc);
   17022 	if (rv != 0)
   17023 		return rv;
   17024 
   17025 	if (link) {
   17026 		k1_enable = 0;
   17027 
   17028 		/* Link stall fix for link up */
   17029 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17030 		    0x0100);
   17031 	} else {
   17032 		/* Link stall fix for link down */
   17033 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17034 		    0x4100);
   17035 	}
   17036 
   17037 	wm_configure_k1_ich8lan(sc, k1_enable);
   17038 	sc->phy.release(sc);
   17039 
   17040 	return 0;
   17041 }
   17042 
   17043 /*
   17044  *  wm_k1_workaround_lv - K1 Si workaround
   17045  *  @sc:   pointer to the HW structure
   17046  *
   17047  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   17048  *  Disable K1 for 1000 and 100 speeds
   17049  */
   17050 static int
   17051 wm_k1_workaround_lv(struct wm_softc *sc)
   17052 {
   17053 	uint32_t reg;
   17054 	uint16_t phyreg;
   17055 	int rv;
   17056 
   17057 	if (sc->sc_type != WM_T_PCH2)
   17058 		return 0;
   17059 
   17060 	/* Set K1 beacon duration based on 10Mbps speed */
   17061 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   17062 	if (rv != 0)
   17063 		return rv;
   17064 
   17065 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   17066 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   17067 		if (phyreg &
   17068 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   17069 			/* LV 1G/100 Packet drop issue wa  */
   17070 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   17071 			    &phyreg);
   17072 			if (rv != 0)
   17073 				return rv;
   17074 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   17075 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   17076 			    phyreg);
   17077 			if (rv != 0)
   17078 				return rv;
   17079 		} else {
   17080 			/* For 10Mbps */
   17081 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   17082 			reg &= ~FEXTNVM4_BEACON_DURATION;
   17083 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   17084 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   17085 		}
   17086 	}
   17087 
   17088 	return 0;
   17089 }
   17090 
   17091 /*
   17092  *  wm_link_stall_workaround_hv - Si workaround
   17093  *  @sc: pointer to the HW structure
   17094  *
   17095  *  This function works around a Si bug where the link partner can get
   17096  *  a link up indication before the PHY does. If small packets are sent
   17097  *  by the link partner they can be placed in the packet buffer without
   17098  *  being properly accounted for by the PHY and will stall preventing
   17099  *  further packets from being received.  The workaround is to clear the
   17100  *  packet buffer after the PHY detects link up.
   17101  */
   17102 static int
   17103 wm_link_stall_workaround_hv(struct wm_softc *sc)
   17104 {
   17105 	uint16_t phyreg;
   17106 
   17107 	if (sc->sc_phytype != WMPHY_82578)
   17108 		return 0;
   17109 
   17110 	/* Do not apply workaround if in PHY loopback bit 14 set */
   17111 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   17112 	if ((phyreg & BMCR_LOOP) != 0)
   17113 		return 0;
   17114 
   17115 	/* Check if link is up and at 1Gbps */
   17116 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   17117 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17118 	    | BM_CS_STATUS_SPEED_MASK;
   17119 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17120 		| BM_CS_STATUS_SPEED_1000))
   17121 		return 0;
   17122 
   17123 	delay(200 * 1000);	/* XXX too big */
   17124 
   17125 	/* Flush the packets in the fifo buffer */
   17126 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17127 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   17128 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17129 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   17130 
   17131 	return 0;
   17132 }
   17133 
   17134 static int
   17135 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   17136 {
   17137 	int rv;
   17138 	uint16_t reg;
   17139 
   17140 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   17141 	if (rv != 0)
   17142 		return rv;
   17143 
   17144 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   17145 	    reg | HV_KMRN_MDIO_SLOW);
   17146 }
   17147 
   17148 /*
   17149  *  wm_configure_k1_ich8lan - Configure K1 power state
   17150  *  @sc: pointer to the HW structure
   17151  *  @enable: K1 state to configure
   17152  *
   17153  *  Configure the K1 power state based on the provided parameter.
   17154  *  Assumes semaphore already acquired.
   17155  */
   17156 static void
   17157 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   17158 {
   17159 	uint32_t ctrl, ctrl_ext, tmp;
   17160 	uint16_t kmreg;
   17161 	int rv;
   17162 
   17163 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17164 
   17165 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   17166 	if (rv != 0)
   17167 		return;
   17168 
   17169 	if (k1_enable)
   17170 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   17171 	else
   17172 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   17173 
   17174 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   17175 	if (rv != 0)
   17176 		return;
   17177 
   17178 	delay(20);
   17179 
   17180 	ctrl = CSR_READ(sc, WMREG_CTRL);
   17181 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   17182 
   17183 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   17184 	tmp |= CTRL_FRCSPD;
   17185 
   17186 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   17187 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   17188 	CSR_WRITE_FLUSH(sc);
   17189 	delay(20);
   17190 
   17191 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   17192 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   17193 	CSR_WRITE_FLUSH(sc);
   17194 	delay(20);
   17195 
   17196 	return;
   17197 }
   17198 
   17199 /* special case - for 82575 - need to do manual init ... */
   17200 static void
   17201 wm_reset_init_script_82575(struct wm_softc *sc)
   17202 {
   17203 	/*
   17204 	 * Remark: this is untested code - we have no board without EEPROM
   17205 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   17206 	 */
   17207 
   17208 	/* SerDes configuration via SERDESCTRL */
   17209 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   17210 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   17211 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   17212 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   17213 
   17214 	/* CCM configuration via CCMCTL register */
   17215 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   17216 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   17217 
   17218 	/* PCIe lanes configuration */
   17219 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   17220 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   17221 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   17222 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   17223 
   17224 	/* PCIe PLL Configuration */
   17225 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   17226 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   17227 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   17228 }
   17229 
   17230 static void
   17231 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   17232 {
   17233 	uint32_t reg;
   17234 	uint16_t nvmword;
   17235 	int rv;
   17236 
   17237 	if (sc->sc_type != WM_T_82580)
   17238 		return;
   17239 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   17240 		return;
   17241 
   17242 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   17243 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   17244 	if (rv != 0) {
   17245 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   17246 		    __func__);
   17247 		return;
   17248 	}
   17249 
   17250 	reg = CSR_READ(sc, WMREG_MDICNFG);
   17251 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   17252 		reg |= MDICNFG_DEST;
   17253 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   17254 		reg |= MDICNFG_COM_MDIO;
   17255 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17256 }
   17257 
   17258 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   17259 
   17260 static bool
   17261 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   17262 {
   17263 	uint32_t reg;
   17264 	uint16_t id1, id2;
   17265 	int i, rv;
   17266 
   17267 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17268 		device_xname(sc->sc_dev), __func__));
   17269 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17270 
   17271 	id1 = id2 = 0xffff;
   17272 	for (i = 0; i < 2; i++) {
   17273 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17274 		    &id1);
   17275 		if ((rv != 0) || MII_INVALIDID(id1))
   17276 			continue;
   17277 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17278 		    &id2);
   17279 		if ((rv != 0) || MII_INVALIDID(id2))
   17280 			continue;
   17281 		break;
   17282 	}
   17283 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   17284 		goto out;
   17285 
   17286 	/*
   17287 	 * In case the PHY needs to be in mdio slow mode,
   17288 	 * set slow mode and try to get the PHY id again.
   17289 	 */
   17290 	rv = 0;
   17291 	if (sc->sc_type < WM_T_PCH_LPT) {
   17292 		sc->phy.release(sc);
   17293 		wm_set_mdio_slow_mode_hv(sc);
   17294 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   17295 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   17296 		sc->phy.acquire(sc);
   17297 	}
   17298 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   17299 		device_printf(sc->sc_dev, "XXX return with false\n");
   17300 		return false;
   17301 	}
   17302 out:
   17303 	if (sc->sc_type >= WM_T_PCH_LPT) {
   17304 		/* Only unforce SMBus if ME is not active */
   17305 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   17306 			uint16_t phyreg;
   17307 
   17308 			/* Unforce SMBus mode in PHY */
   17309 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   17310 			    CV_SMB_CTRL, &phyreg);
   17311 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   17312 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   17313 			    CV_SMB_CTRL, phyreg);
   17314 
   17315 			/* Unforce SMBus mode in MAC */
   17316 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17317 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   17318 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17319 		}
   17320 	}
   17321 	return true;
   17322 }
   17323 
   17324 static void
   17325 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   17326 {
   17327 	uint32_t reg;
   17328 	int i;
   17329 
   17330 	/* Set PHY Config Counter to 50msec */
   17331 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   17332 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   17333 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   17334 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   17335 
   17336 	/* Toggle LANPHYPC */
   17337 	reg = CSR_READ(sc, WMREG_CTRL);
   17338 	reg |= CTRL_LANPHYPC_OVERRIDE;
   17339 	reg &= ~CTRL_LANPHYPC_VALUE;
   17340 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17341 	CSR_WRITE_FLUSH(sc);
   17342 	delay(1000);
   17343 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   17344 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17345 	CSR_WRITE_FLUSH(sc);
   17346 
   17347 	if (sc->sc_type < WM_T_PCH_LPT)
   17348 		delay(50 * 1000);
   17349 	else {
   17350 		i = 20;
   17351 
   17352 		do {
   17353 			delay(5 * 1000);
   17354 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   17355 		    && i--);
   17356 
   17357 		delay(30 * 1000);
   17358 	}
   17359 }
   17360 
   17361 static int
   17362 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   17363 {
   17364 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   17365 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   17366 	uint32_t rxa;
   17367 	uint16_t scale = 0, lat_enc = 0;
   17368 	int32_t obff_hwm = 0;
   17369 	int64_t lat_ns, value;
   17370 
   17371 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17372 		device_xname(sc->sc_dev), __func__));
   17373 
   17374 	if (link) {
   17375 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   17376 		uint32_t status;
   17377 		uint16_t speed;
   17378 		pcireg_t preg;
   17379 
   17380 		status = CSR_READ(sc, WMREG_STATUS);
   17381 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   17382 		case STATUS_SPEED_10:
   17383 			speed = 10;
   17384 			break;
   17385 		case STATUS_SPEED_100:
   17386 			speed = 100;
   17387 			break;
   17388 		case STATUS_SPEED_1000:
   17389 			speed = 1000;
   17390 			break;
   17391 		default:
   17392 			device_printf(sc->sc_dev, "Unknown speed "
   17393 			    "(status = %08x)\n", status);
   17394 			return -1;
   17395 		}
   17396 
   17397 		/* Rx Packet Buffer Allocation size (KB) */
   17398 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   17399 
   17400 		/*
   17401 		 * Determine the maximum latency tolerated by the device.
   17402 		 *
   17403 		 * Per the PCIe spec, the tolerated latencies are encoded as
   17404 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   17405 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   17406 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   17407 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   17408 		 */
   17409 		lat_ns = ((int64_t)rxa * 1024 -
   17410 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   17411 			+ ETHER_HDR_LEN))) * 8 * 1000;
   17412 		if (lat_ns < 0)
   17413 			lat_ns = 0;
   17414 		else
   17415 			lat_ns /= speed;
   17416 		value = lat_ns;
   17417 
   17418 		while (value > LTRV_VALUE) {
   17419 			scale ++;
   17420 			value = howmany(value, __BIT(5));
   17421 		}
   17422 		if (scale > LTRV_SCALE_MAX) {
   17423 			device_printf(sc->sc_dev,
   17424 			    "Invalid LTR latency scale %d\n", scale);
   17425 			return -1;
   17426 		}
   17427 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   17428 
   17429 		/* Determine the maximum latency tolerated by the platform */
   17430 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17431 		    WM_PCI_LTR_CAP_LPT);
   17432 		max_snoop = preg & 0xffff;
   17433 		max_nosnoop = preg >> 16;
   17434 
   17435 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   17436 
   17437 		if (lat_enc > max_ltr_enc) {
   17438 			lat_enc = max_ltr_enc;
   17439 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   17440 			    * PCI_LTR_SCALETONS(
   17441 				    __SHIFTOUT(lat_enc,
   17442 					PCI_LTR_MAXSNOOPLAT_SCALE));
   17443 		}
   17444 
   17445 		if (lat_ns) {
   17446 			lat_ns *= speed * 1000;
   17447 			lat_ns /= 8;
   17448 			lat_ns /= 1000000000;
   17449 			obff_hwm = (int32_t)(rxa - lat_ns);
   17450 		}
   17451 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   17452 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   17453 			    "(rxa = %d, lat_ns = %d)\n",
   17454 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   17455 			return -1;
   17456 		}
   17457 	}
   17458 	/* Snoop and No-Snoop latencies the same */
   17459 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   17460 	CSR_WRITE(sc, WMREG_LTRV, reg);
   17461 
   17462 	/* Set OBFF high water mark */
   17463 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   17464 	reg |= obff_hwm;
   17465 	CSR_WRITE(sc, WMREG_SVT, reg);
   17466 
   17467 	/* Enable OBFF */
   17468 	reg = CSR_READ(sc, WMREG_SVCR);
   17469 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   17470 	CSR_WRITE(sc, WMREG_SVCR, reg);
   17471 
   17472 	return 0;
   17473 }
   17474 
   17475 /*
   17476  * I210 Errata 25 and I211 Errata 10
   17477  * Slow System Clock.
   17478  *
   17479  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   17480  */
   17481 static int
   17482 wm_pll_workaround_i210(struct wm_softc *sc)
   17483 {
   17484 	uint32_t mdicnfg, wuc;
   17485 	uint32_t reg;
   17486 	pcireg_t pcireg;
   17487 	uint32_t pmreg;
   17488 	uint16_t nvmword, tmp_nvmword;
   17489 	uint16_t phyval;
   17490 	bool wa_done = false;
   17491 	int i, rv = 0;
   17492 
   17493 	/* Get Power Management cap offset */
   17494 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   17495 	    &pmreg, NULL) == 0)
   17496 		return -1;
   17497 
   17498 	/* Save WUC and MDICNFG registers */
   17499 	wuc = CSR_READ(sc, WMREG_WUC);
   17500 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   17501 
   17502 	reg = mdicnfg & ~MDICNFG_DEST;
   17503 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17504 
   17505 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   17506 		/*
   17507 		 * The default value of the Initialization Control Word 1
   17508 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   17509 		 */
   17510 		nvmword = INVM_DEFAULT_AL;
   17511 	}
   17512 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   17513 
   17514 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   17515 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   17516 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   17517 
   17518 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   17519 			rv = 0;
   17520 			break; /* OK */
   17521 		} else
   17522 			rv = -1;
   17523 
   17524 		wa_done = true;
   17525 		/* Directly reset the internal PHY */
   17526 		reg = CSR_READ(sc, WMREG_CTRL);
   17527 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   17528 
   17529 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17530 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   17531 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17532 
   17533 		CSR_WRITE(sc, WMREG_WUC, 0);
   17534 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   17535 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17536 
   17537 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17538 		    pmreg + PCI_PMCSR);
   17539 		pcireg |= PCI_PMCSR_STATE_D3;
   17540 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17541 		    pmreg + PCI_PMCSR, pcireg);
   17542 		delay(1000);
   17543 		pcireg &= ~PCI_PMCSR_STATE_D3;
   17544 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17545 		    pmreg + PCI_PMCSR, pcireg);
   17546 
   17547 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   17548 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17549 
   17550 		/* Restore WUC register */
   17551 		CSR_WRITE(sc, WMREG_WUC, wuc);
   17552 	}
   17553 
   17554 	/* Restore MDICNFG setting */
   17555 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   17556 	if (wa_done)
   17557 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   17558 	return rv;
   17559 }
   17560 
   17561 static void
   17562 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   17563 {
   17564 	uint32_t reg;
   17565 
   17566 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17567 		device_xname(sc->sc_dev), __func__));
   17568 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   17569 	    || (sc->sc_type == WM_T_PCH_CNP));
   17570 
   17571 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   17572 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   17573 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17574 
   17575 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17576 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17577 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17578 }
   17579 
   17580 /* Sysctl functions */
   17581 static int
   17582 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
   17583 {
   17584 	struct sysctlnode node = *rnode;
   17585 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17586 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17587 	struct wm_softc *sc = txq->txq_sc;
   17588 	uint32_t reg;
   17589 
   17590 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
   17591 	node.sysctl_data = &reg;
   17592 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17593 }
   17594 
   17595 static int
   17596 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
   17597 {
   17598 	struct sysctlnode node = *rnode;
   17599 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17600 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17601 	struct wm_softc *sc = txq->txq_sc;
   17602 	uint32_t reg;
   17603 
   17604 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
   17605 	node.sysctl_data = &reg;
   17606 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17607 }
   17608 
   17609 #ifdef WM_DEBUG
   17610 static int
   17611 wm_sysctl_debug(SYSCTLFN_ARGS)
   17612 {
   17613 	struct sysctlnode node = *rnode;
   17614 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17615 	uint32_t dflags;
   17616 	int error;
   17617 
   17618 	dflags = sc->sc_debug;
   17619 	node.sysctl_data = &dflags;
   17620 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17621 
   17622 	if (error || newp == NULL)
   17623 		return error;
   17624 
   17625 	sc->sc_debug = dflags;
   17626 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
   17627 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
   17628 
   17629 	return 0;
   17630 }
   17631 #endif
   17632