Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.761
      1 /*	$NetBSD: if_wm.c,v 1.761 2022/08/12 10:58:21 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.761 2022/08/12 10:58:21 riastradh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 
     94 #include <sys/atomic.h>
     95 #include <sys/callout.h>
     96 #include <sys/cpu.h>
     97 #include <sys/device.h>
     98 #include <sys/errno.h>
     99 #include <sys/interrupt.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/kernel.h>
    102 #include <sys/kmem.h>
    103 #include <sys/mbuf.h>
    104 #include <sys/pcq.h>
    105 #include <sys/queue.h>
    106 #include <sys/rndsource.h>
    107 #include <sys/socket.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/syslog.h>
    110 #include <sys/systm.h>
    111 #include <sys/workqueue.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <net/rss_config.h>
    121 
    122 #include <netinet/in.h>			/* XXX for struct ip */
    123 #include <netinet/in_systm.h>		/* XXX for struct ip */
    124 #include <netinet/ip.h>			/* XXX for struct ip */
    125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    127 
    128 #include <sys/bus.h>
    129 #include <sys/intr.h>
    130 #include <machine/endian.h>
    131 
    132 #include <dev/mii/mii.h>
    133 #include <dev/mii/mdio.h>
    134 #include <dev/mii/miivar.h>
    135 #include <dev/mii/miidevs.h>
    136 #include <dev/mii/mii_bitbang.h>
    137 #include <dev/mii/ikphyreg.h>
    138 #include <dev/mii/igphyreg.h>
    139 #include <dev/mii/igphyvar.h>
    140 #include <dev/mii/inbmphyreg.h>
    141 #include <dev/mii/ihphyreg.h>
    142 #include <dev/mii/makphyreg.h>
    143 
    144 #include <dev/pci/pcireg.h>
    145 #include <dev/pci/pcivar.h>
    146 #include <dev/pci/pcidevs.h>
    147 
    148 #include <dev/pci/if_wmreg.h>
    149 #include <dev/pci/if_wmvar.h>
    150 
    151 #ifdef WM_DEBUG
    152 #define	WM_DEBUG_LINK		__BIT(0)
    153 #define	WM_DEBUG_TX		__BIT(1)
    154 #define	WM_DEBUG_RX		__BIT(2)
    155 #define	WM_DEBUG_GMII		__BIT(3)
    156 #define	WM_DEBUG_MANAGE		__BIT(4)
    157 #define	WM_DEBUG_NVM		__BIT(5)
    158 #define	WM_DEBUG_INIT		__BIT(6)
    159 #define	WM_DEBUG_LOCK		__BIT(7)
    160 
    161 #if 0
    162 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    163 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    164 	WM_DEBUG_LOCK
    165 #endif
    166 
    167 #define	DPRINTF(sc, x, y)			  \
    168 	do {					  \
    169 		if ((sc)->sc_debug & (x))	  \
    170 			printf y;		  \
    171 	} while (0)
    172 #else
    173 #define	DPRINTF(sc, x, y)	__nothing
    174 #endif /* WM_DEBUG */
    175 
    176 #ifdef NET_MPSAFE
    177 #define WM_MPSAFE	1
    178 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    179 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    180 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    181 #else
    182 #define WM_CALLOUT_FLAGS	0
    183 #define WM_SOFTINT_FLAGS	0
    184 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    185 #endif
    186 
    187 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    188 
    189 /*
    190  * This device driver's max interrupt numbers.
    191  */
    192 #define WM_MAX_NQUEUEINTR	16
    193 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    194 
    195 #ifndef WM_DISABLE_MSI
    196 #define	WM_DISABLE_MSI 0
    197 #endif
    198 #ifndef WM_DISABLE_MSIX
    199 #define	WM_DISABLE_MSIX 0
    200 #endif
    201 
    202 int wm_disable_msi = WM_DISABLE_MSI;
    203 int wm_disable_msix = WM_DISABLE_MSIX;
    204 
    205 #ifndef WM_WATCHDOG_TIMEOUT
    206 #define WM_WATCHDOG_TIMEOUT 5
    207 #endif
    208 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    209 
    210 /*
    211  * Transmit descriptor list size.  Due to errata, we can only have
    212  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    213  * on >= 82544. We tell the upper layers that they can queue a lot
    214  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    215  * of them at a time.
    216  *
    217  * We allow up to 64 DMA segments per packet.  Pathological packet
    218  * chains containing many small mbufs have been observed in zero-copy
    219  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    220  * m_defrag() is called to reduce it.
    221  */
    222 #define	WM_NTXSEGS		64
    223 #define	WM_IFQUEUELEN		256
    224 #define	WM_TXQUEUELEN_MAX	64
    225 #define	WM_TXQUEUELEN_MAX_82547	16
    226 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    227 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    228 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    229 #define	WM_NTXDESC_82542	256
    230 #define	WM_NTXDESC_82544	4096
    231 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    232 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    233 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    234 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    235 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    236 
    237 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    238 
    239 #define	WM_TXINTERQSIZE		256
    240 
    241 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    242 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    243 #endif
    244 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    245 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    246 #endif
    247 
    248 /*
    249  * Receive descriptor list size.  We have one Rx buffer for normal
    250  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    251  * packet.  We allocate 256 receive descriptors, each with a 2k
    252  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    253  */
    254 #define	WM_NRXDESC		256U
    255 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    256 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    257 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    258 
    259 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    260 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    261 #endif
    262 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    263 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    264 #endif
    265 
    266 typedef union txdescs {
    267 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    268 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    269 } txdescs_t;
    270 
    271 typedef union rxdescs {
    272 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    273 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    274 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    275 } rxdescs_t;
    276 
    277 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    278 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    279 
    280 /*
    281  * Software state for transmit jobs.
    282  */
    283 struct wm_txsoft {
    284 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    285 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    286 	int txs_firstdesc;		/* first descriptor in packet */
    287 	int txs_lastdesc;		/* last descriptor in packet */
    288 	int txs_ndesc;			/* # of descriptors used */
    289 };
    290 
    291 /*
    292  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    293  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    294  * them together.
    295  */
    296 struct wm_rxsoft {
    297 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    298 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    299 };
    300 
    301 #define WM_LINKUP_TIMEOUT	50
    302 
    303 static uint16_t swfwphysem[] = {
    304 	SWFW_PHY0_SM,
    305 	SWFW_PHY1_SM,
    306 	SWFW_PHY2_SM,
    307 	SWFW_PHY3_SM
    308 };
    309 
    310 static const uint32_t wm_82580_rxpbs_table[] = {
    311 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    312 };
    313 
    314 struct wm_softc;
    315 
    316 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    317 #if !defined(WM_EVENT_COUNTERS)
    318 #define WM_EVENT_COUNTERS 1
    319 #endif
    320 #endif
    321 
    322 #ifdef WM_EVENT_COUNTERS
    323 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
    324 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    325 	struct evcnt qname##_ev_##evname
    326 
    327 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    328 	do {								\
    329 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    330 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    331 		    "%s%02d%s", #qname, (qnum), #evname);		\
    332 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    333 		    (evtype), NULL, (xname),				\
    334 		    (q)->qname##_##evname##_evcnt_name);		\
    335 	} while (0)
    336 
    337 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    338 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    339 
    340 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    341 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    342 
    343 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    344 	evcnt_detach(&(q)->qname##_ev_##evname)
    345 #endif /* WM_EVENT_COUNTERS */
    346 
    347 struct wm_txqueue {
    348 	kmutex_t *txq_lock;		/* lock for tx operations */
    349 
    350 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    351 
    352 	/* Software state for the transmit descriptors. */
    353 	int txq_num;			/* must be a power of two */
    354 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    355 
    356 	/* TX control data structures. */
    357 	int txq_ndesc;			/* must be a power of two */
    358 	size_t txq_descsize;		/* a tx descriptor size */
    359 	txdescs_t *txq_descs_u;
    360 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    361 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    362 	int txq_desc_rseg;		/* real number of control segment */
    363 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    364 #define	txq_descs	txq_descs_u->sctxu_txdescs
    365 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    366 
    367 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    368 
    369 	int txq_free;			/* number of free Tx descriptors */
    370 	int txq_next;			/* next ready Tx descriptor */
    371 
    372 	int txq_sfree;			/* number of free Tx jobs */
    373 	int txq_snext;			/* next free Tx job */
    374 	int txq_sdirty;			/* dirty Tx jobs */
    375 
    376 	/* These 4 variables are used only on the 82547. */
    377 	int txq_fifo_size;		/* Tx FIFO size */
    378 	int txq_fifo_head;		/* current head of FIFO */
    379 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    380 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    381 
    382 	/*
    383 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    384 	 * CPUs. This queue intermediate them without block.
    385 	 */
    386 	pcq_t *txq_interq;
    387 
    388 	/*
    389 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    390 	 * to manage Tx H/W queue's busy flag.
    391 	 */
    392 	int txq_flags;			/* flags for H/W queue, see below */
    393 #define	WM_TXQ_NO_SPACE		0x1
    394 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    395 
    396 	bool txq_stopping;
    397 
    398 	bool txq_sending;
    399 	time_t txq_lastsent;
    400 
    401 	/* Checksum flags used for previous packet */
    402 	uint32_t	txq_last_hw_cmd;
    403 	uint8_t		txq_last_hw_fields;
    404 	uint16_t	txq_last_hw_ipcs;
    405 	uint16_t	txq_last_hw_tucs;
    406 
    407 	uint32_t txq_packets;		/* for AIM */
    408 	uint32_t txq_bytes;		/* for AIM */
    409 #ifdef WM_EVENT_COUNTERS
    410 	/* TX event counters */
    411 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
    412 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
    413 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
    414 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
    415 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
    416 					    /* XXX not used? */
    417 
    418 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
    419 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
    420 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
    421 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
    422 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
    423 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
    424 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
    425 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
    426 					    /* other than toomanyseg */
    427 
    428 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
    429 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
    430 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
    431 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
    432 
    433 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    434 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    435 #endif /* WM_EVENT_COUNTERS */
    436 };
    437 
    438 struct wm_rxqueue {
    439 	kmutex_t *rxq_lock;		/* lock for rx operations */
    440 
    441 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    442 
    443 	/* Software state for the receive descriptors. */
    444 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    445 
    446 	/* RX control data structures. */
    447 	int rxq_ndesc;			/* must be a power of two */
    448 	size_t rxq_descsize;		/* a rx descriptor size */
    449 	rxdescs_t *rxq_descs_u;
    450 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    451 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    452 	int rxq_desc_rseg;		/* real number of control segment */
    453 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    454 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    455 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    456 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    457 
    458 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    459 
    460 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    461 	int rxq_discard;
    462 	int rxq_len;
    463 	struct mbuf *rxq_head;
    464 	struct mbuf *rxq_tail;
    465 	struct mbuf **rxq_tailp;
    466 
    467 	bool rxq_stopping;
    468 
    469 	uint32_t rxq_packets;		/* for AIM */
    470 	uint32_t rxq_bytes;		/* for AIM */
    471 #ifdef WM_EVENT_COUNTERS
    472 	/* RX event counters */
    473 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    474 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    475 
    476 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    477 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    478 #endif
    479 };
    480 
    481 struct wm_queue {
    482 	int wmq_id;			/* index of TX/RX queues */
    483 	int wmq_intr_idx;		/* index of MSI-X tables */
    484 
    485 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    486 	bool wmq_set_itr;
    487 
    488 	struct wm_txqueue wmq_txq;
    489 	struct wm_rxqueue wmq_rxq;
    490 	char sysctlname[32];		/* Name for sysctl */
    491 
    492 	bool wmq_txrx_use_workqueue;
    493 	struct work wmq_cookie;
    494 	void *wmq_si;
    495 };
    496 
    497 struct wm_phyop {
    498 	int (*acquire)(struct wm_softc *);
    499 	void (*release)(struct wm_softc *);
    500 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    501 	int (*writereg_locked)(device_t, int, int, uint16_t);
    502 	int reset_delay_us;
    503 	bool no_errprint;
    504 };
    505 
    506 struct wm_nvmop {
    507 	int (*acquire)(struct wm_softc *);
    508 	void (*release)(struct wm_softc *);
    509 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    510 };
    511 
    512 /*
    513  * Software state per device.
    514  */
    515 struct wm_softc {
    516 	device_t sc_dev;		/* generic device information */
    517 	bus_space_tag_t sc_st;		/* bus space tag */
    518 	bus_space_handle_t sc_sh;	/* bus space handle */
    519 	bus_size_t sc_ss;		/* bus space size */
    520 	bus_space_tag_t sc_iot;		/* I/O space tag */
    521 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    522 	bus_size_t sc_ios;		/* I/O space size */
    523 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    524 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    525 	bus_size_t sc_flashs;		/* flash registers space size */
    526 	off_t sc_flashreg_offset;	/*
    527 					 * offset to flash registers from
    528 					 * start of BAR
    529 					 */
    530 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    531 
    532 	struct ethercom sc_ethercom;	/* Ethernet common data */
    533 	struct mii_data sc_mii;		/* MII/media information */
    534 
    535 	pci_chipset_tag_t sc_pc;
    536 	pcitag_t sc_pcitag;
    537 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    538 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    539 
    540 	uint16_t sc_pcidevid;		/* PCI device ID */
    541 	wm_chip_type sc_type;		/* MAC type */
    542 	int sc_rev;			/* MAC revision */
    543 	wm_phy_type sc_phytype;		/* PHY type */
    544 	uint8_t sc_sfptype;		/* SFP type */
    545 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    546 #define	WM_MEDIATYPE_UNKNOWN		0x00
    547 #define	WM_MEDIATYPE_FIBER		0x01
    548 #define	WM_MEDIATYPE_COPPER		0x02
    549 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    550 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    551 	int sc_flags;			/* flags; see below */
    552 	u_short sc_if_flags;		/* last if_flags */
    553 	int sc_ec_capenable;		/* last ec_capenable */
    554 	int sc_flowflags;		/* 802.3x flow control flags */
    555 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    556 	int sc_align_tweak;
    557 
    558 	void *sc_ihs[WM_MAX_NINTR];	/*
    559 					 * interrupt cookie.
    560 					 * - legacy and msi use sc_ihs[0] only
    561 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    562 					 */
    563 	pci_intr_handle_t *sc_intrs;	/*
    564 					 * legacy and msi use sc_intrs[0] only
    565 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    566 					 */
    567 	int sc_nintrs;			/* number of interrupts */
    568 
    569 	int sc_link_intr_idx;		/* index of MSI-X tables */
    570 
    571 	callout_t sc_tick_ch;		/* tick callout */
    572 	bool sc_core_stopping;
    573 
    574 	int sc_nvm_ver_major;
    575 	int sc_nvm_ver_minor;
    576 	int sc_nvm_ver_build;
    577 	int sc_nvm_addrbits;		/* NVM address bits */
    578 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    579 	int sc_ich8_flash_base;
    580 	int sc_ich8_flash_bank_size;
    581 	int sc_nvm_k1_enabled;
    582 
    583 	int sc_nqueues;
    584 	struct wm_queue *sc_queue;
    585 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    586 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    587 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    588 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    589 	struct workqueue *sc_queue_wq;
    590 	bool sc_txrx_use_workqueue;
    591 
    592 	int sc_affinity_offset;
    593 
    594 #ifdef WM_EVENT_COUNTERS
    595 	/* Event counters. */
    596 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    597 
    598 	/* >= WM_T_82542_2_1 */
    599 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    600 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    601 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    602 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    603 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    604 
    605 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
    606 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
    607 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
    608 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
    609 	struct evcnt sc_ev_mpc;		/* Missed Packets */
    610 	struct evcnt sc_ev_colc;	/* Collision */
    611 	struct evcnt sc_ev_sec;		/* Sequence Error */
    612 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
    613 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
    614 	struct evcnt sc_ev_scc;		/* Single Collision */
    615 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
    616 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
    617 	struct evcnt sc_ev_latecol;	/* Late Collision */
    618 	struct evcnt sc_ev_dc;		/* Defer */
    619 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
    620 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
    621 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
    622 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
    623 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
    624 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
    625 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
    626 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
    627 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
    628 	struct evcnt sc_ev_roc;		/* Rx Oversize */
    629 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
    630 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
    631 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
    632 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
    633 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
    634 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
    635 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx Count */
    636 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
    637 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
    638 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
    639 	struct evcnt sc_ev_prc511;	/* Packets Rx (255-511 bytes) */
    640 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
    641 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
    642 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
    643 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
    644 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
    645 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
    646 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
    647 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
    648 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
    649 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
    650 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
    651 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
    652 	struct evcnt sc_ev_ictxact;	/* Intr. Cause Tx Abs Timer Expire */
    653 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
    654 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
    655 	struct evcnt sc_ev_icrxdmtc;	/* Intr. Cause Rx Desc Min Thresh */
    656 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
    657 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
    658 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
    659 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
    660 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
    661 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
    662 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
    663 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
    664 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
    665 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
    666 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
    667 
    668 #endif /* WM_EVENT_COUNTERS */
    669 
    670 	struct sysctllog *sc_sysctllog;
    671 
    672 	/* This variable are used only on the 82547. */
    673 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    674 
    675 	uint32_t sc_ctrl;		/* prototype CTRL register */
    676 #if 0
    677 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    678 #endif
    679 	uint32_t sc_icr;		/* prototype interrupt bits */
    680 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    681 	uint32_t sc_tctl;		/* prototype TCTL register */
    682 	uint32_t sc_rctl;		/* prototype RCTL register */
    683 	uint32_t sc_txcw;		/* prototype TXCW register */
    684 	uint32_t sc_tipg;		/* prototype TIPG register */
    685 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    686 	uint32_t sc_pba;		/* prototype PBA register */
    687 
    688 	int sc_tbi_linkup;		/* TBI link status */
    689 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    690 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    691 
    692 	int sc_mchash_type;		/* multicast filter offset */
    693 
    694 	krndsource_t rnd_source;	/* random source */
    695 
    696 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    697 
    698 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    699 	kmutex_t *sc_ich_phymtx;	/*
    700 					 * 82574/82583/ICH/PCH specific PHY
    701 					 * mutex. For 82574/82583, the mutex
    702 					 * is used for both PHY and NVM.
    703 					 */
    704 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    705 
    706 	struct wm_phyop phy;
    707 	struct wm_nvmop nvm;
    708 
    709 	struct workqueue *sc_reset_wq;
    710 	struct work sc_reset_work;
    711 	volatile unsigned sc_reset_pending;
    712 
    713 	bool sc_dying;
    714 
    715 #ifdef WM_DEBUG
    716 	uint32_t sc_debug;
    717 	bool sc_trigger_reset;
    718 #endif
    719 };
    720 
    721 #define WM_CORE_LOCK(_sc)						\
    722 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    723 #define WM_CORE_UNLOCK(_sc)						\
    724 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    725 #define WM_CORE_LOCKED(_sc)						\
    726 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    727 
    728 #define	WM_RXCHAIN_RESET(rxq)						\
    729 do {									\
    730 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    731 	*(rxq)->rxq_tailp = NULL;					\
    732 	(rxq)->rxq_len = 0;						\
    733 } while (/*CONSTCOND*/0)
    734 
    735 #define	WM_RXCHAIN_LINK(rxq, m)						\
    736 do {									\
    737 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    738 	(rxq)->rxq_tailp = &(m)->m_next;				\
    739 } while (/*CONSTCOND*/0)
    740 
    741 #ifdef WM_EVENT_COUNTERS
    742 #ifdef __HAVE_ATOMIC64_LOADSTORE
    743 #define	WM_EVCNT_INCR(ev)						\
    744 	atomic_store_relaxed(&((ev)->ev_count),				\
    745 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    746 #define	WM_EVCNT_ADD(ev, val)						\
    747 	atomic_store_relaxed(&((ev)->ev_count),				\
    748 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    749 #else
    750 #define	WM_EVCNT_INCR(ev)						\
    751 	((ev)->ev_count)++
    752 #define	WM_EVCNT_ADD(ev, val)						\
    753 	(ev)->ev_count += (val)
    754 #endif
    755 
    756 #define WM_Q_EVCNT_INCR(qname, evname)			\
    757 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    758 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    759 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    760 #else /* !WM_EVENT_COUNTERS */
    761 #define	WM_EVCNT_INCR(ev)	/* nothing */
    762 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    763 
    764 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    765 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    766 #endif /* !WM_EVENT_COUNTERS */
    767 
    768 #define	CSR_READ(sc, reg)						\
    769 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    770 #define	CSR_WRITE(sc, reg, val)						\
    771 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    772 #define	CSR_WRITE_FLUSH(sc)						\
    773 	(void)CSR_READ((sc), WMREG_STATUS)
    774 
    775 #define ICH8_FLASH_READ32(sc, reg)					\
    776 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    777 	    (reg) + sc->sc_flashreg_offset)
    778 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    779 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    780 	    (reg) + sc->sc_flashreg_offset, (data))
    781 
    782 #define ICH8_FLASH_READ16(sc, reg)					\
    783 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    784 	    (reg) + sc->sc_flashreg_offset)
    785 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    786 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    787 	    (reg) + sc->sc_flashreg_offset, (data))
    788 
    789 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    790 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    791 
    792 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    793 #define	WM_CDTXADDR_HI(txq, x)						\
    794 	(sizeof(bus_addr_t) == 8 ?					\
    795 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    796 
    797 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    798 #define	WM_CDRXADDR_HI(rxq, x)						\
    799 	(sizeof(bus_addr_t) == 8 ?					\
    800 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    801 
    802 /*
    803  * Register read/write functions.
    804  * Other than CSR_{READ|WRITE}().
    805  */
    806 #if 0
    807 static inline uint32_t wm_io_read(struct wm_softc *, int);
    808 #endif
    809 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    810 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    811     uint32_t, uint32_t);
    812 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    813 
    814 /*
    815  * Descriptor sync/init functions.
    816  */
    817 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    818 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    819 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    820 
    821 /*
    822  * Device driver interface functions and commonly used functions.
    823  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    824  */
    825 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    826 static int	wm_match(device_t, cfdata_t, void *);
    827 static void	wm_attach(device_t, device_t, void *);
    828 static int	wm_detach(device_t, int);
    829 static bool	wm_suspend(device_t, const pmf_qual_t *);
    830 static bool	wm_resume(device_t, const pmf_qual_t *);
    831 static bool	wm_watchdog(struct ifnet *);
    832 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    833     uint16_t *);
    834 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    835     uint16_t *);
    836 static void	wm_tick(void *);
    837 static int	wm_ifflags_cb(struct ethercom *);
    838 static int	wm_ioctl(struct ifnet *, u_long, void *);
    839 /* MAC address related */
    840 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    841 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    842 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    843 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    844 static int	wm_rar_count(struct wm_softc *);
    845 static void	wm_set_filter(struct wm_softc *);
    846 /* Reset and init related */
    847 static void	wm_set_vlan(struct wm_softc *);
    848 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    849 static void	wm_get_auto_rd_done(struct wm_softc *);
    850 static void	wm_lan_init_done(struct wm_softc *);
    851 static void	wm_get_cfg_done(struct wm_softc *);
    852 static int	wm_phy_post_reset(struct wm_softc *);
    853 static int	wm_write_smbus_addr(struct wm_softc *);
    854 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    855 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    856 static void	wm_initialize_hardware_bits(struct wm_softc *);
    857 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    858 static int	wm_reset_phy(struct wm_softc *);
    859 static void	wm_flush_desc_rings(struct wm_softc *);
    860 static void	wm_reset(struct wm_softc *);
    861 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    862 static void	wm_rxdrain(struct wm_rxqueue *);
    863 static void	wm_init_rss(struct wm_softc *);
    864 static void	wm_adjust_qnum(struct wm_softc *, int);
    865 static inline bool	wm_is_using_msix(struct wm_softc *);
    866 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    867 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    868 static int	wm_setup_legacy(struct wm_softc *);
    869 static int	wm_setup_msix(struct wm_softc *);
    870 static int	wm_init(struct ifnet *);
    871 static int	wm_init_locked(struct ifnet *);
    872 static void	wm_init_sysctls(struct wm_softc *);
    873 static void	wm_unset_stopping_flags(struct wm_softc *);
    874 static void	wm_set_stopping_flags(struct wm_softc *);
    875 static void	wm_stop(struct ifnet *, int);
    876 static void	wm_stop_locked(struct ifnet *, bool, bool);
    877 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    878 static void	wm_82547_txfifo_stall(void *);
    879 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    880 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    881 /* DMA related */
    882 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    883 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    884 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    885 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    886     struct wm_txqueue *);
    887 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    888 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    889 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    890     struct wm_rxqueue *);
    891 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    892 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    893 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    894 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    895 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    896 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    897 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    898     struct wm_txqueue *);
    899 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    900     struct wm_rxqueue *);
    901 static int	wm_alloc_txrx_queues(struct wm_softc *);
    902 static void	wm_free_txrx_queues(struct wm_softc *);
    903 static int	wm_init_txrx_queues(struct wm_softc *);
    904 /* Start */
    905 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    906     struct wm_txsoft *, uint32_t *, uint8_t *);
    907 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    908 static void	wm_start(struct ifnet *);
    909 static void	wm_start_locked(struct ifnet *);
    910 static int	wm_transmit(struct ifnet *, struct mbuf *);
    911 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    912 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    913 		    bool);
    914 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    915     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    916 static void	wm_nq_start(struct ifnet *);
    917 static void	wm_nq_start_locked(struct ifnet *);
    918 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    919 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    920 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    921 		    bool);
    922 static void	wm_deferred_start_locked(struct wm_txqueue *);
    923 static void	wm_handle_queue(void *);
    924 static void	wm_handle_queue_work(struct work *, void *);
    925 static void	wm_handle_reset_work(struct work *, void *);
    926 /* Interrupt */
    927 static bool	wm_txeof(struct wm_txqueue *, u_int);
    928 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    929 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    930 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    931 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    932 static void	wm_linkintr(struct wm_softc *, uint32_t);
    933 static int	wm_intr_legacy(void *);
    934 static inline void	wm_txrxintr_disable(struct wm_queue *);
    935 static inline void	wm_txrxintr_enable(struct wm_queue *);
    936 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    937 static int	wm_txrxintr_msix(void *);
    938 static int	wm_linkintr_msix(void *);
    939 
    940 /*
    941  * Media related.
    942  * GMII, SGMII, TBI, SERDES and SFP.
    943  */
    944 /* Common */
    945 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    946 /* GMII related */
    947 static void	wm_gmii_reset(struct wm_softc *);
    948 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    949 static int	wm_get_phy_id_82575(struct wm_softc *);
    950 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    951 static int	wm_gmii_mediachange(struct ifnet *);
    952 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    953 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    954 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    955 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    956 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    957 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    958 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    959 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    960 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    961 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    962 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    963 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    964 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    965 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    966 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    967 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    968 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    969 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    970 	bool);
    971 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    972 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    973 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    974 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    975 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    976 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    977 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    978 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    979 static void	wm_gmii_statchg(struct ifnet *);
    980 /*
    981  * kumeran related (80003, ICH* and PCH*).
    982  * These functions are not for accessing MII registers but for accessing
    983  * kumeran specific registers.
    984  */
    985 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    986 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    987 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    988 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    989 /* EMI register related */
    990 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    991 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    992 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    993 /* SGMII */
    994 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    995 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    996 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    997 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    998 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    999 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
   1000 /* TBI related */
   1001 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
   1002 static void	wm_tbi_mediainit(struct wm_softc *);
   1003 static int	wm_tbi_mediachange(struct ifnet *);
   1004 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
   1005 static int	wm_check_for_link(struct wm_softc *);
   1006 static void	wm_tbi_tick(struct wm_softc *);
   1007 /* SERDES related */
   1008 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
   1009 static int	wm_serdes_mediachange(struct ifnet *);
   1010 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
   1011 static void	wm_serdes_tick(struct wm_softc *);
   1012 /* SFP related */
   1013 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
   1014 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
   1015 
   1016 /*
   1017  * NVM related.
   1018  * Microwire, SPI (w/wo EERD) and Flash.
   1019  */
   1020 /* Misc functions */
   1021 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
   1022 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
   1023 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
   1024 /* Microwire */
   1025 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
   1026 /* SPI */
   1027 static int	wm_nvm_ready_spi(struct wm_softc *);
   1028 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
   1029 /* Using with EERD */
   1030 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
   1031 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
   1032 /* Flash */
   1033 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
   1034     unsigned int *);
   1035 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
   1036 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
   1037 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
   1038     uint32_t *);
   1039 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
   1040 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
   1041 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
   1042 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
   1043 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
   1044 /* iNVM */
   1045 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
   1046 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
   1047 /* Lock, detecting NVM type, validate checksum and read */
   1048 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
   1049 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
   1050 static int	wm_nvm_validate_checksum(struct wm_softc *);
   1051 static void	wm_nvm_version_invm(struct wm_softc *);
   1052 static void	wm_nvm_version(struct wm_softc *);
   1053 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
   1054 
   1055 /*
   1056  * Hardware semaphores.
   1057  * Very complexed...
   1058  */
   1059 static int	wm_get_null(struct wm_softc *);
   1060 static void	wm_put_null(struct wm_softc *);
   1061 static int	wm_get_eecd(struct wm_softc *);
   1062 static void	wm_put_eecd(struct wm_softc *);
   1063 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
   1064 static void	wm_put_swsm_semaphore(struct wm_softc *);
   1065 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
   1066 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
   1067 static int	wm_get_nvm_80003(struct wm_softc *);
   1068 static void	wm_put_nvm_80003(struct wm_softc *);
   1069 static int	wm_get_nvm_82571(struct wm_softc *);
   1070 static void	wm_put_nvm_82571(struct wm_softc *);
   1071 static int	wm_get_phy_82575(struct wm_softc *);
   1072 static void	wm_put_phy_82575(struct wm_softc *);
   1073 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1074 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1075 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1076 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1077 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1078 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1079 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1080 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1081 
   1082 /*
   1083  * Management mode and power management related subroutines.
   1084  * BMC, AMT, suspend/resume and EEE.
   1085  */
   1086 #if 0
   1087 static int	wm_check_mng_mode(struct wm_softc *);
   1088 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1089 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1090 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1091 #endif
   1092 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1093 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1094 static void	wm_get_hw_control(struct wm_softc *);
   1095 static void	wm_release_hw_control(struct wm_softc *);
   1096 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1097 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1098 static void	wm_init_manageability(struct wm_softc *);
   1099 static void	wm_release_manageability(struct wm_softc *);
   1100 static void	wm_get_wakeup(struct wm_softc *);
   1101 static int	wm_ulp_disable(struct wm_softc *);
   1102 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1103 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1104 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1105 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1106 static void	wm_enable_wakeup(struct wm_softc *);
   1107 static void	wm_disable_aspm(struct wm_softc *);
   1108 /* LPLU (Low Power Link Up) */
   1109 static void	wm_lplu_d0_disable(struct wm_softc *);
   1110 /* EEE */
   1111 static int	wm_set_eee_i350(struct wm_softc *);
   1112 static int	wm_set_eee_pchlan(struct wm_softc *);
   1113 static int	wm_set_eee(struct wm_softc *);
   1114 
   1115 /*
   1116  * Workarounds (mainly PHY related).
   1117  * Basically, PHY's workarounds are in the PHY drivers.
   1118  */
   1119 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1120 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1121 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1122 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1123 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1124 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1125 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1126 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1127 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1128 static int	wm_k1_workaround_lv(struct wm_softc *);
   1129 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1130 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1131 static int	wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
   1132 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1133 static void	wm_reset_init_script_82575(struct wm_softc *);
   1134 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1135 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1136 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1137 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1138 static int	wm_pll_workaround_i210(struct wm_softc *);
   1139 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1140 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1141 static void	wm_set_linkdown_discard(struct wm_softc *);
   1142 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1143 
   1144 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
   1145 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
   1146 #ifdef WM_DEBUG
   1147 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1148 #endif
   1149 
   1150 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1151     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1152 
   1153 /*
   1154  * Devices supported by this driver.
   1155  */
   1156 static const struct wm_product {
   1157 	pci_vendor_id_t		wmp_vendor;
   1158 	pci_product_id_t	wmp_product;
   1159 	const char		*wmp_name;
   1160 	wm_chip_type		wmp_type;
   1161 	uint32_t		wmp_flags;
   1162 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1163 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1164 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1165 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1166 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1167 } wm_products[] = {
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1169 	  "Intel i82542 1000BASE-X Ethernet",
   1170 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1173 	  "Intel i82543GC 1000BASE-X Ethernet",
   1174 	  WM_T_82543,		WMP_F_FIBER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1177 	  "Intel i82543GC 1000BASE-T Ethernet",
   1178 	  WM_T_82543,		WMP_F_COPPER },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1181 	  "Intel i82544EI 1000BASE-T Ethernet",
   1182 	  WM_T_82544,		WMP_F_COPPER },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1185 	  "Intel i82544EI 1000BASE-X Ethernet",
   1186 	  WM_T_82544,		WMP_F_FIBER },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1189 	  "Intel i82544GC 1000BASE-T Ethernet",
   1190 	  WM_T_82544,		WMP_F_COPPER },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1193 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1194 	  WM_T_82544,		WMP_F_COPPER },
   1195 
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1197 	  "Intel i82540EM 1000BASE-T Ethernet",
   1198 	  WM_T_82540,		WMP_F_COPPER },
   1199 
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1201 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1202 	  WM_T_82540,		WMP_F_COPPER },
   1203 
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1205 	  "Intel i82540EP 1000BASE-T Ethernet",
   1206 	  WM_T_82540,		WMP_F_COPPER },
   1207 
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1209 	  "Intel i82540EP 1000BASE-T Ethernet",
   1210 	  WM_T_82540,		WMP_F_COPPER },
   1211 
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1213 	  "Intel i82540EP 1000BASE-T Ethernet",
   1214 	  WM_T_82540,		WMP_F_COPPER },
   1215 
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1217 	  "Intel i82545EM 1000BASE-T Ethernet",
   1218 	  WM_T_82545,		WMP_F_COPPER },
   1219 
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1221 	  "Intel i82545GM 1000BASE-T Ethernet",
   1222 	  WM_T_82545_3,		WMP_F_COPPER },
   1223 
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1225 	  "Intel i82545GM 1000BASE-X Ethernet",
   1226 	  WM_T_82545_3,		WMP_F_FIBER },
   1227 
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1229 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1230 	  WM_T_82545_3,		WMP_F_SERDES },
   1231 
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1233 	  "Intel i82546EB 1000BASE-T Ethernet",
   1234 	  WM_T_82546,		WMP_F_COPPER },
   1235 
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1237 	  "Intel i82546EB 1000BASE-T Ethernet",
   1238 	  WM_T_82546,		WMP_F_COPPER },
   1239 
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1241 	  "Intel i82545EM 1000BASE-X Ethernet",
   1242 	  WM_T_82545,		WMP_F_FIBER },
   1243 
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1245 	  "Intel i82546EB 1000BASE-X Ethernet",
   1246 	  WM_T_82546,		WMP_F_FIBER },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1249 	  "Intel i82546GB 1000BASE-T Ethernet",
   1250 	  WM_T_82546_3,		WMP_F_COPPER },
   1251 
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1253 	  "Intel i82546GB 1000BASE-X Ethernet",
   1254 	  WM_T_82546_3,		WMP_F_FIBER },
   1255 
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1257 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1258 	  WM_T_82546_3,		WMP_F_SERDES },
   1259 
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1261 	  "i82546GB quad-port Gigabit Ethernet",
   1262 	  WM_T_82546_3,		WMP_F_COPPER },
   1263 
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1265 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1266 	  WM_T_82546_3,		WMP_F_COPPER },
   1267 
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1269 	  "Intel PRO/1000MT (82546GB)",
   1270 	  WM_T_82546_3,		WMP_F_COPPER },
   1271 
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1273 	  "Intel i82541EI 1000BASE-T Ethernet",
   1274 	  WM_T_82541,		WMP_F_COPPER },
   1275 
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1277 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1278 	  WM_T_82541,		WMP_F_COPPER },
   1279 
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1281 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1282 	  WM_T_82541,		WMP_F_COPPER },
   1283 
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1285 	  "Intel i82541ER 1000BASE-T Ethernet",
   1286 	  WM_T_82541_2,		WMP_F_COPPER },
   1287 
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1289 	  "Intel i82541GI 1000BASE-T Ethernet",
   1290 	  WM_T_82541_2,		WMP_F_COPPER },
   1291 
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1293 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1294 	  WM_T_82541_2,		WMP_F_COPPER },
   1295 
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1297 	  "Intel i82541PI 1000BASE-T Ethernet",
   1298 	  WM_T_82541_2,		WMP_F_COPPER },
   1299 
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1301 	  "Intel i82547EI 1000BASE-T Ethernet",
   1302 	  WM_T_82547,		WMP_F_COPPER },
   1303 
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1305 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1306 	  WM_T_82547,		WMP_F_COPPER },
   1307 
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1309 	  "Intel i82547GI 1000BASE-T Ethernet",
   1310 	  WM_T_82547_2,		WMP_F_COPPER },
   1311 
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1313 	  "Intel PRO/1000 PT (82571EB)",
   1314 	  WM_T_82571,		WMP_F_COPPER },
   1315 
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1317 	  "Intel PRO/1000 PF (82571EB)",
   1318 	  WM_T_82571,		WMP_F_FIBER },
   1319 
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1321 	  "Intel PRO/1000 PB (82571EB)",
   1322 	  WM_T_82571,		WMP_F_SERDES },
   1323 
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1325 	  "Intel PRO/1000 QT (82571EB)",
   1326 	  WM_T_82571,		WMP_F_COPPER },
   1327 
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1329 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1330 	  WM_T_82571,		WMP_F_COPPER },
   1331 
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1333 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1334 	  WM_T_82571,		WMP_F_COPPER },
   1335 
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1337 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1338 	  WM_T_82571,		WMP_F_SERDES },
   1339 
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1341 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1342 	  WM_T_82571,		WMP_F_SERDES },
   1343 
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1345 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1346 	  WM_T_82571,		WMP_F_FIBER },
   1347 
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1349 	  "Intel i82572EI 1000baseT Ethernet",
   1350 	  WM_T_82572,		WMP_F_COPPER },
   1351 
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1353 	  "Intel i82572EI 1000baseX Ethernet",
   1354 	  WM_T_82572,		WMP_F_FIBER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1357 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1358 	  WM_T_82572,		WMP_F_SERDES },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1361 	  "Intel i82572EI 1000baseT Ethernet",
   1362 	  WM_T_82572,		WMP_F_COPPER },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1365 	  "Intel i82573E",
   1366 	  WM_T_82573,		WMP_F_COPPER },
   1367 
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1369 	  "Intel i82573E IAMT",
   1370 	  WM_T_82573,		WMP_F_COPPER },
   1371 
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1373 	  "Intel i82573L Gigabit Ethernet",
   1374 	  WM_T_82573,		WMP_F_COPPER },
   1375 
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1377 	  "Intel i82574L",
   1378 	  WM_T_82574,		WMP_F_COPPER },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1381 	  "Intel i82574L",
   1382 	  WM_T_82574,		WMP_F_COPPER },
   1383 
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1385 	  "Intel i82583V",
   1386 	  WM_T_82583,		WMP_F_COPPER },
   1387 
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1389 	  "i80003 dual 1000baseT Ethernet",
   1390 	  WM_T_80003,		WMP_F_COPPER },
   1391 
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1393 	  "i80003 dual 1000baseX Ethernet",
   1394 	  WM_T_80003,		WMP_F_COPPER },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1397 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1398 	  WM_T_80003,		WMP_F_SERDES },
   1399 
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1401 	  "Intel i80003 1000baseT Ethernet",
   1402 	  WM_T_80003,		WMP_F_COPPER },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1405 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1406 	  WM_T_80003,		WMP_F_SERDES },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1409 	  "Intel i82801H (M_AMT) LAN Controller",
   1410 	  WM_T_ICH8,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1412 	  "Intel i82801H (AMT) LAN Controller",
   1413 	  WM_T_ICH8,		WMP_F_COPPER },
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1415 	  "Intel i82801H LAN Controller",
   1416 	  WM_T_ICH8,		WMP_F_COPPER },
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1418 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1419 	  WM_T_ICH8,		WMP_F_COPPER },
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1421 	  "Intel i82801H (M) LAN Controller",
   1422 	  WM_T_ICH8,		WMP_F_COPPER },
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1424 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1425 	  WM_T_ICH8,		WMP_F_COPPER },
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1427 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1428 	  WM_T_ICH8,		WMP_F_COPPER },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1430 	  "82567V-3 LAN Controller",
   1431 	  WM_T_ICH8,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1433 	  "82801I (AMT) LAN Controller",
   1434 	  WM_T_ICH9,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1436 	  "82801I 10/100 LAN Controller",
   1437 	  WM_T_ICH9,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1439 	  "82801I (G) 10/100 LAN Controller",
   1440 	  WM_T_ICH9,		WMP_F_COPPER },
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1442 	  "82801I (GT) 10/100 LAN Controller",
   1443 	  WM_T_ICH9,		WMP_F_COPPER },
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1445 	  "82801I (C) LAN Controller",
   1446 	  WM_T_ICH9,		WMP_F_COPPER },
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1448 	  "82801I mobile LAN Controller",
   1449 	  WM_T_ICH9,		WMP_F_COPPER },
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1451 	  "82801I mobile (V) LAN Controller",
   1452 	  WM_T_ICH9,		WMP_F_COPPER },
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1454 	  "82801I mobile (AMT) LAN Controller",
   1455 	  WM_T_ICH9,		WMP_F_COPPER },
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1457 	  "82567LM-4 LAN Controller",
   1458 	  WM_T_ICH9,		WMP_F_COPPER },
   1459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1460 	  "82567LM-2 LAN Controller",
   1461 	  WM_T_ICH10,		WMP_F_COPPER },
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1463 	  "82567LF-2 LAN Controller",
   1464 	  WM_T_ICH10,		WMP_F_COPPER },
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1466 	  "82567LM-3 LAN Controller",
   1467 	  WM_T_ICH10,		WMP_F_COPPER },
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1469 	  "82567LF-3 LAN Controller",
   1470 	  WM_T_ICH10,		WMP_F_COPPER },
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1472 	  "82567V-2 LAN Controller",
   1473 	  WM_T_ICH10,		WMP_F_COPPER },
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1475 	  "82567V-3? LAN Controller",
   1476 	  WM_T_ICH10,		WMP_F_COPPER },
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1478 	  "HANKSVILLE LAN Controller",
   1479 	  WM_T_ICH10,		WMP_F_COPPER },
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1481 	  "PCH LAN (82577LM) Controller",
   1482 	  WM_T_PCH,		WMP_F_COPPER },
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1484 	  "PCH LAN (82577LC) Controller",
   1485 	  WM_T_PCH,		WMP_F_COPPER },
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1487 	  "PCH LAN (82578DM) Controller",
   1488 	  WM_T_PCH,		WMP_F_COPPER },
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1490 	  "PCH LAN (82578DC) Controller",
   1491 	  WM_T_PCH,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1493 	  "PCH2 LAN (82579LM) Controller",
   1494 	  WM_T_PCH2,		WMP_F_COPPER },
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1496 	  "PCH2 LAN (82579V) Controller",
   1497 	  WM_T_PCH2,		WMP_F_COPPER },
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1499 	  "82575EB dual-1000baseT Ethernet",
   1500 	  WM_T_82575,		WMP_F_COPPER },
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1502 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1503 	  WM_T_82575,		WMP_F_SERDES },
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1505 	  "82575GB quad-1000baseT Ethernet",
   1506 	  WM_T_82575,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1508 	  "82575GB quad-1000baseT Ethernet (PM)",
   1509 	  WM_T_82575,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1511 	  "82576 1000BaseT Ethernet",
   1512 	  WM_T_82576,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1514 	  "82576 1000BaseX Ethernet",
   1515 	  WM_T_82576,		WMP_F_FIBER },
   1516 
   1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1518 	  "82576 gigabit Ethernet (SERDES)",
   1519 	  WM_T_82576,		WMP_F_SERDES },
   1520 
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1522 	  "82576 quad-1000BaseT Ethernet",
   1523 	  WM_T_82576,		WMP_F_COPPER },
   1524 
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1526 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1527 	  WM_T_82576,		WMP_F_COPPER },
   1528 
   1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1530 	  "82576 gigabit Ethernet",
   1531 	  WM_T_82576,		WMP_F_COPPER },
   1532 
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1534 	  "82576 gigabit Ethernet (SERDES)",
   1535 	  WM_T_82576,		WMP_F_SERDES },
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1537 	  "82576 quad-gigabit Ethernet (SERDES)",
   1538 	  WM_T_82576,		WMP_F_SERDES },
   1539 
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1541 	  "82580 1000BaseT Ethernet",
   1542 	  WM_T_82580,		WMP_F_COPPER },
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1544 	  "82580 1000BaseX Ethernet",
   1545 	  WM_T_82580,		WMP_F_FIBER },
   1546 
   1547 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1548 	  "82580 1000BaseT Ethernet (SERDES)",
   1549 	  WM_T_82580,		WMP_F_SERDES },
   1550 
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1552 	  "82580 gigabit Ethernet (SGMII)",
   1553 	  WM_T_82580,		WMP_F_COPPER },
   1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1555 	  "82580 dual-1000BaseT Ethernet",
   1556 	  WM_T_82580,		WMP_F_COPPER },
   1557 
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1559 	  "82580 quad-1000BaseX Ethernet",
   1560 	  WM_T_82580,		WMP_F_FIBER },
   1561 
   1562 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1563 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1564 	  WM_T_82580,		WMP_F_COPPER },
   1565 
   1566 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1567 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1568 	  WM_T_82580,		WMP_F_SERDES },
   1569 
   1570 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1571 	  "DH89XXCC 1000BASE-KX Ethernet",
   1572 	  WM_T_82580,		WMP_F_SERDES },
   1573 
   1574 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1575 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1576 	  WM_T_82580,		WMP_F_SERDES },
   1577 
   1578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1579 	  "I350 Gigabit Network Connection",
   1580 	  WM_T_I350,		WMP_F_COPPER },
   1581 
   1582 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1583 	  "I350 Gigabit Fiber Network Connection",
   1584 	  WM_T_I350,		WMP_F_FIBER },
   1585 
   1586 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1587 	  "I350 Gigabit Backplane Connection",
   1588 	  WM_T_I350,		WMP_F_SERDES },
   1589 
   1590 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1591 	  "I350 Quad Port Gigabit Ethernet",
   1592 	  WM_T_I350,		WMP_F_SERDES },
   1593 
   1594 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1595 	  "I350 Gigabit Connection",
   1596 	  WM_T_I350,		WMP_F_COPPER },
   1597 
   1598 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1599 	  "I354 Gigabit Ethernet (KX)",
   1600 	  WM_T_I354,		WMP_F_SERDES },
   1601 
   1602 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1603 	  "I354 Gigabit Ethernet (SGMII)",
   1604 	  WM_T_I354,		WMP_F_COPPER },
   1605 
   1606 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1607 	  "I354 Gigabit Ethernet (2.5G)",
   1608 	  WM_T_I354,		WMP_F_COPPER },
   1609 
   1610 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1611 	  "I210-T1 Ethernet Server Adapter",
   1612 	  WM_T_I210,		WMP_F_COPPER },
   1613 
   1614 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1615 	  "I210 Ethernet (Copper OEM)",
   1616 	  WM_T_I210,		WMP_F_COPPER },
   1617 
   1618 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1619 	  "I210 Ethernet (Copper IT)",
   1620 	  WM_T_I210,		WMP_F_COPPER },
   1621 
   1622 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1623 	  "I210 Ethernet (Copper, FLASH less)",
   1624 	  WM_T_I210,		WMP_F_COPPER },
   1625 
   1626 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1627 	  "I210 Gigabit Ethernet (Fiber)",
   1628 	  WM_T_I210,		WMP_F_FIBER },
   1629 
   1630 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1631 	  "I210 Gigabit Ethernet (SERDES)",
   1632 	  WM_T_I210,		WMP_F_SERDES },
   1633 
   1634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1635 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1636 	  WM_T_I210,		WMP_F_SERDES },
   1637 
   1638 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1639 	  "I210 Gigabit Ethernet (SGMII)",
   1640 	  WM_T_I210,		WMP_F_COPPER },
   1641 
   1642 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1643 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1644 	  WM_T_I210,		WMP_F_COPPER },
   1645 
   1646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1647 	  "I211 Ethernet (COPPER)",
   1648 	  WM_T_I211,		WMP_F_COPPER },
   1649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1650 	  "I217 V Ethernet Connection",
   1651 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1652 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1653 	  "I217 LM Ethernet Connection",
   1654 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1656 	  "I218 V Ethernet Connection",
   1657 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1659 	  "I218 V Ethernet Connection",
   1660 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1662 	  "I218 V Ethernet Connection",
   1663 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1664 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1665 	  "I218 LM Ethernet Connection",
   1666 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1668 	  "I218 LM Ethernet Connection",
   1669 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1671 	  "I218 LM Ethernet Connection",
   1672 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1674 	  "I219 LM Ethernet Connection",
   1675 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1676 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1677 	  "I219 LM (2) Ethernet Connection",
   1678 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1679 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1680 	  "I219 LM (3) Ethernet Connection",
   1681 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1682 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1683 	  "I219 LM (4) Ethernet Connection",
   1684 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1685 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1686 	  "I219 LM (5) Ethernet Connection",
   1687 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1688 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1689 	  "I219 LM (6) Ethernet Connection",
   1690 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1691 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1692 	  "I219 LM (7) Ethernet Connection",
   1693 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1694 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1695 	  "I219 LM (8) Ethernet Connection",
   1696 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1698 	  "I219 LM (9) Ethernet Connection",
   1699 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1700 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1701 	  "I219 LM (10) Ethernet Connection",
   1702 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1703 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1704 	  "I219 LM (11) Ethernet Connection",
   1705 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1706 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1707 	  "I219 LM (12) Ethernet Connection",
   1708 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1709 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1710 	  "I219 LM (13) Ethernet Connection",
   1711 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1712 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1713 	  "I219 LM (14) Ethernet Connection",
   1714 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1715 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1716 	  "I219 LM (15) Ethernet Connection",
   1717 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1718 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1719 	  "I219 LM (16) Ethernet Connection",
   1720 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1721 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1722 	  "I219 LM (17) Ethernet Connection",
   1723 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1724 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1725 	  "I219 LM (18) Ethernet Connection",
   1726 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1727 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1728 	  "I219 LM (19) Ethernet Connection",
   1729 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1730 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1731 	  "I219 V Ethernet Connection",
   1732 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1733 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1734 	  "I219 V (2) Ethernet Connection",
   1735 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1736 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1737 	  "I219 V (4) Ethernet Connection",
   1738 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1739 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1740 	  "I219 V (5) Ethernet Connection",
   1741 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1742 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1743 	  "I219 V (6) Ethernet Connection",
   1744 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1745 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1746 	  "I219 V (7) Ethernet Connection",
   1747 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1748 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1749 	  "I219 V (8) Ethernet Connection",
   1750 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1751 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1752 	  "I219 V (9) Ethernet Connection",
   1753 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1754 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1755 	  "I219 V (10) Ethernet Connection",
   1756 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1757 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1758 	  "I219 V (11) Ethernet Connection",
   1759 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1760 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1761 	  "I219 V (12) Ethernet Connection",
   1762 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1763 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1764 	  "I219 V (13) Ethernet Connection",
   1765 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1766 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1767 	  "I219 V (14) Ethernet Connection",
   1768 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1769 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1770 	  "I219 V (15) Ethernet Connection",
   1771 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1772 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1773 	  "I219 V (16) Ethernet Connection",
   1774 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1775 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1776 	  "I219 V (17) Ethernet Connection",
   1777 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1778 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1779 	  "I219 V (18) Ethernet Connection",
   1780 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1781 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1782 	  "I219 V (19) Ethernet Connection",
   1783 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1784 	{ 0,			0,
   1785 	  NULL,
   1786 	  0,			0 },
   1787 };
   1788 
   1789 /*
   1790  * Register read/write functions.
   1791  * Other than CSR_{READ|WRITE}().
   1792  */
   1793 
   1794 #if 0 /* Not currently used */
   1795 static inline uint32_t
   1796 wm_io_read(struct wm_softc *sc, int reg)
   1797 {
   1798 
   1799 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1800 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1801 }
   1802 #endif
   1803 
   1804 static inline void
   1805 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1806 {
   1807 
   1808 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1809 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1810 }
   1811 
   1812 static inline void
   1813 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1814     uint32_t data)
   1815 {
   1816 	uint32_t regval;
   1817 	int i;
   1818 
   1819 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1820 
   1821 	CSR_WRITE(sc, reg, regval);
   1822 
   1823 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1824 		delay(5);
   1825 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1826 			break;
   1827 	}
   1828 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1829 		aprint_error("%s: WARNING:"
   1830 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1831 		    device_xname(sc->sc_dev), reg);
   1832 	}
   1833 }
   1834 
   1835 static inline void
   1836 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1837 {
   1838 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
   1839 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
   1840 }
   1841 
   1842 /*
   1843  * Descriptor sync/init functions.
   1844  */
   1845 static inline void
   1846 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1847 {
   1848 	struct wm_softc *sc = txq->txq_sc;
   1849 
   1850 	/* If it will wrap around, sync to the end of the ring. */
   1851 	if ((start + num) > WM_NTXDESC(txq)) {
   1852 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1853 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1854 		    (WM_NTXDESC(txq) - start), ops);
   1855 		num -= (WM_NTXDESC(txq) - start);
   1856 		start = 0;
   1857 	}
   1858 
   1859 	/* Now sync whatever is left. */
   1860 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1861 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1862 }
   1863 
   1864 static inline void
   1865 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1866 {
   1867 	struct wm_softc *sc = rxq->rxq_sc;
   1868 
   1869 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1870 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1871 }
   1872 
   1873 static inline void
   1874 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1875 {
   1876 	struct wm_softc *sc = rxq->rxq_sc;
   1877 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1878 	struct mbuf *m = rxs->rxs_mbuf;
   1879 
   1880 	/*
   1881 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1882 	 * so that the payload after the Ethernet header is aligned
   1883 	 * to a 4-byte boundary.
   1884 
   1885 	 * XXX BRAINDAMAGE ALERT!
   1886 	 * The stupid chip uses the same size for every buffer, which
   1887 	 * is set in the Receive Control register.  We are using the 2K
   1888 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1889 	 * reason, we can't "scoot" packets longer than the standard
   1890 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1891 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1892 	 * the upper layer copy the headers.
   1893 	 */
   1894 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1895 
   1896 	if (sc->sc_type == WM_T_82574) {
   1897 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1898 		rxd->erx_data.erxd_addr =
   1899 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1900 		rxd->erx_data.erxd_dd = 0;
   1901 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1902 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1903 
   1904 		rxd->nqrx_data.nrxd_paddr =
   1905 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1906 		/* Currently, split header is not supported. */
   1907 		rxd->nqrx_data.nrxd_haddr = 0;
   1908 	} else {
   1909 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1910 
   1911 		wm_set_dma_addr(&rxd->wrx_addr,
   1912 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1913 		rxd->wrx_len = 0;
   1914 		rxd->wrx_cksum = 0;
   1915 		rxd->wrx_status = 0;
   1916 		rxd->wrx_errors = 0;
   1917 		rxd->wrx_special = 0;
   1918 	}
   1919 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1920 
   1921 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1922 }
   1923 
   1924 /*
   1925  * Device driver interface functions and commonly used functions.
   1926  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1927  */
   1928 
   1929 /* Lookup supported device table */
   1930 static const struct wm_product *
   1931 wm_lookup(const struct pci_attach_args *pa)
   1932 {
   1933 	const struct wm_product *wmp;
   1934 
   1935 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1936 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1937 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1938 			return wmp;
   1939 	}
   1940 	return NULL;
   1941 }
   1942 
   1943 /* The match function (ca_match) */
   1944 static int
   1945 wm_match(device_t parent, cfdata_t cf, void *aux)
   1946 {
   1947 	struct pci_attach_args *pa = aux;
   1948 
   1949 	if (wm_lookup(pa) != NULL)
   1950 		return 1;
   1951 
   1952 	return 0;
   1953 }
   1954 
   1955 /* The attach function (ca_attach) */
   1956 static void
   1957 wm_attach(device_t parent, device_t self, void *aux)
   1958 {
   1959 	struct wm_softc *sc = device_private(self);
   1960 	struct pci_attach_args *pa = aux;
   1961 	prop_dictionary_t dict;
   1962 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1963 	pci_chipset_tag_t pc = pa->pa_pc;
   1964 	int counts[PCI_INTR_TYPE_SIZE];
   1965 	pci_intr_type_t max_type;
   1966 	const char *eetype, *xname;
   1967 	bus_space_tag_t memt;
   1968 	bus_space_handle_t memh;
   1969 	bus_size_t memsize;
   1970 	int memh_valid;
   1971 	int i, error;
   1972 	const struct wm_product *wmp;
   1973 	prop_data_t ea;
   1974 	prop_number_t pn;
   1975 	uint8_t enaddr[ETHER_ADDR_LEN];
   1976 	char buf[256];
   1977 	char wqname[MAXCOMLEN];
   1978 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1979 	pcireg_t preg, memtype;
   1980 	uint16_t eeprom_data, apme_mask;
   1981 	bool force_clear_smbi;
   1982 	uint32_t link_mode;
   1983 	uint32_t reg;
   1984 
   1985 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1986 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1987 #endif
   1988 	sc->sc_dev = self;
   1989 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1990 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1991 	sc->sc_core_stopping = false;
   1992 
   1993 	wmp = wm_lookup(pa);
   1994 #ifdef DIAGNOSTIC
   1995 	if (wmp == NULL) {
   1996 		printf("\n");
   1997 		panic("wm_attach: impossible");
   1998 	}
   1999 #endif
   2000 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   2001 
   2002 	sc->sc_pc = pa->pa_pc;
   2003 	sc->sc_pcitag = pa->pa_tag;
   2004 
   2005 	if (pci_dma64_available(pa)) {
   2006 		aprint_verbose(", 64-bit DMA");
   2007 		sc->sc_dmat = pa->pa_dmat64;
   2008 	} else {
   2009 		aprint_verbose(", 32-bit DMA");
   2010 		sc->sc_dmat = pa->pa_dmat;
   2011 	}
   2012 
   2013 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   2014 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   2015 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   2016 
   2017 	sc->sc_type = wmp->wmp_type;
   2018 
   2019 	/* Set default function pointers */
   2020 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   2021 	sc->phy.release = sc->nvm.release = wm_put_null;
   2022 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   2023 
   2024 	if (sc->sc_type < WM_T_82543) {
   2025 		if (sc->sc_rev < 2) {
   2026 			aprint_error_dev(sc->sc_dev,
   2027 			    "i82542 must be at least rev. 2\n");
   2028 			return;
   2029 		}
   2030 		if (sc->sc_rev < 3)
   2031 			sc->sc_type = WM_T_82542_2_0;
   2032 	}
   2033 
   2034 	/*
   2035 	 * Disable MSI for Errata:
   2036 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   2037 	 *
   2038 	 *  82544: Errata 25
   2039 	 *  82540: Errata  6 (easy to reproduce device timeout)
   2040 	 *  82545: Errata  4 (easy to reproduce device timeout)
   2041 	 *  82546: Errata 26 (easy to reproduce device timeout)
   2042 	 *  82541: Errata  7 (easy to reproduce device timeout)
   2043 	 *
   2044 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   2045 	 *
   2046 	 *  82571 & 82572: Errata 63
   2047 	 */
   2048 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   2049 	    || (sc->sc_type == WM_T_82572))
   2050 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   2051 
   2052 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2053 	    || (sc->sc_type == WM_T_82580)
   2054 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2055 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2056 		sc->sc_flags |= WM_F_NEWQUEUE;
   2057 
   2058 	/* Set device properties (mactype) */
   2059 	dict = device_properties(sc->sc_dev);
   2060 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   2061 
   2062 	/*
   2063 	 * Map the device.  All devices support memory-mapped acccess,
   2064 	 * and it is really required for normal operation.
   2065 	 */
   2066 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   2067 	switch (memtype) {
   2068 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2069 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2070 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   2071 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   2072 		break;
   2073 	default:
   2074 		memh_valid = 0;
   2075 		break;
   2076 	}
   2077 
   2078 	if (memh_valid) {
   2079 		sc->sc_st = memt;
   2080 		sc->sc_sh = memh;
   2081 		sc->sc_ss = memsize;
   2082 	} else {
   2083 		aprint_error_dev(sc->sc_dev,
   2084 		    "unable to map device registers\n");
   2085 		return;
   2086 	}
   2087 
   2088 	/*
   2089 	 * In addition, i82544 and later support I/O mapped indirect
   2090 	 * register access.  It is not desirable (nor supported in
   2091 	 * this driver) to use it for normal operation, though it is
   2092 	 * required to work around bugs in some chip versions.
   2093 	 */
   2094 	switch (sc->sc_type) {
   2095 	case WM_T_82544:
   2096 	case WM_T_82541:
   2097 	case WM_T_82541_2:
   2098 	case WM_T_82547:
   2099 	case WM_T_82547_2:
   2100 		/* First we have to find the I/O BAR. */
   2101 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2102 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2103 			if (memtype == PCI_MAPREG_TYPE_IO)
   2104 				break;
   2105 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2106 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2107 				i += 4;	/* skip high bits, too */
   2108 		}
   2109 		if (i < PCI_MAPREG_END) {
   2110 			/*
   2111 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2112 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2113 			 * It's no problem because newer chips has no this
   2114 			 * bug.
   2115 			 *
   2116 			 * The i8254x doesn't apparently respond when the
   2117 			 * I/O BAR is 0, which looks somewhat like it's not
   2118 			 * been configured.
   2119 			 */
   2120 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2121 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2122 				aprint_error_dev(sc->sc_dev,
   2123 				    "WARNING: I/O BAR at zero.\n");
   2124 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2125 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
   2126 			    == 0) {
   2127 				sc->sc_flags |= WM_F_IOH_VALID;
   2128 			} else
   2129 				aprint_error_dev(sc->sc_dev,
   2130 				    "WARNING: unable to map I/O space\n");
   2131 		}
   2132 		break;
   2133 	default:
   2134 		break;
   2135 	}
   2136 
   2137 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2138 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2139 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2140 	if (sc->sc_type < WM_T_82542_2_1)
   2141 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2142 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2143 
   2144 	/* Power up chip */
   2145 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2146 	    && error != EOPNOTSUPP) {
   2147 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2148 		return;
   2149 	}
   2150 
   2151 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2152 	/*
   2153 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2154 	 * resource.
   2155 	 */
   2156 	if (sc->sc_nqueues > 1) {
   2157 		max_type = PCI_INTR_TYPE_MSIX;
   2158 		/*
   2159 		 *  82583 has a MSI-X capability in the PCI configuration space
   2160 		 * but it doesn't support it. At least the document doesn't
   2161 		 * say anything about MSI-X.
   2162 		 */
   2163 		counts[PCI_INTR_TYPE_MSIX]
   2164 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2165 	} else {
   2166 		max_type = PCI_INTR_TYPE_MSI;
   2167 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2168 	}
   2169 
   2170 	/* Allocation settings */
   2171 	counts[PCI_INTR_TYPE_MSI] = 1;
   2172 	counts[PCI_INTR_TYPE_INTX] = 1;
   2173 	/* overridden by disable flags */
   2174 	if (wm_disable_msi != 0) {
   2175 		counts[PCI_INTR_TYPE_MSI] = 0;
   2176 		if (wm_disable_msix != 0) {
   2177 			max_type = PCI_INTR_TYPE_INTX;
   2178 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2179 		}
   2180 	} else if (wm_disable_msix != 0) {
   2181 		max_type = PCI_INTR_TYPE_MSI;
   2182 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2183 	}
   2184 
   2185 alloc_retry:
   2186 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2187 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2188 		return;
   2189 	}
   2190 
   2191 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2192 		error = wm_setup_msix(sc);
   2193 		if (error) {
   2194 			pci_intr_release(pc, sc->sc_intrs,
   2195 			    counts[PCI_INTR_TYPE_MSIX]);
   2196 
   2197 			/* Setup for MSI: Disable MSI-X */
   2198 			max_type = PCI_INTR_TYPE_MSI;
   2199 			counts[PCI_INTR_TYPE_MSI] = 1;
   2200 			counts[PCI_INTR_TYPE_INTX] = 1;
   2201 			goto alloc_retry;
   2202 		}
   2203 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2204 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2205 		error = wm_setup_legacy(sc);
   2206 		if (error) {
   2207 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2208 			    counts[PCI_INTR_TYPE_MSI]);
   2209 
   2210 			/* The next try is for INTx: Disable MSI */
   2211 			max_type = PCI_INTR_TYPE_INTX;
   2212 			counts[PCI_INTR_TYPE_INTX] = 1;
   2213 			goto alloc_retry;
   2214 		}
   2215 	} else {
   2216 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2217 		error = wm_setup_legacy(sc);
   2218 		if (error) {
   2219 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2220 			    counts[PCI_INTR_TYPE_INTX]);
   2221 			return;
   2222 		}
   2223 	}
   2224 
   2225 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2226 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2227 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2228 	    WM_WORKQUEUE_FLAGS);
   2229 	if (error) {
   2230 		aprint_error_dev(sc->sc_dev,
   2231 		    "unable to create TxRx workqueue\n");
   2232 		goto out;
   2233 	}
   2234 
   2235 	snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
   2236 	error = workqueue_create(&sc->sc_reset_wq, wqname,
   2237 	    wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
   2238 	    WQ_MPSAFE);
   2239 	if (error) {
   2240 		workqueue_destroy(sc->sc_queue_wq);
   2241 		aprint_error_dev(sc->sc_dev,
   2242 		    "unable to create reset workqueue\n");
   2243 		goto out;
   2244 	}
   2245 
   2246 	/*
   2247 	 * Check the function ID (unit number of the chip).
   2248 	 */
   2249 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2250 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2251 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2252 	    || (sc->sc_type == WM_T_82580)
   2253 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2254 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2255 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2256 	else
   2257 		sc->sc_funcid = 0;
   2258 
   2259 	/*
   2260 	 * Determine a few things about the bus we're connected to.
   2261 	 */
   2262 	if (sc->sc_type < WM_T_82543) {
   2263 		/* We don't really know the bus characteristics here. */
   2264 		sc->sc_bus_speed = 33;
   2265 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2266 		/*
   2267 		 * CSA (Communication Streaming Architecture) is about as fast
   2268 		 * a 32-bit 66MHz PCI Bus.
   2269 		 */
   2270 		sc->sc_flags |= WM_F_CSA;
   2271 		sc->sc_bus_speed = 66;
   2272 		aprint_verbose_dev(sc->sc_dev,
   2273 		    "Communication Streaming Architecture\n");
   2274 		if (sc->sc_type == WM_T_82547) {
   2275 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2276 			callout_setfunc(&sc->sc_txfifo_ch,
   2277 			    wm_82547_txfifo_stall, sc);
   2278 			aprint_verbose_dev(sc->sc_dev,
   2279 			    "using 82547 Tx FIFO stall work-around\n");
   2280 		}
   2281 	} else if (sc->sc_type >= WM_T_82571) {
   2282 		sc->sc_flags |= WM_F_PCIE;
   2283 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2284 		    && (sc->sc_type != WM_T_ICH10)
   2285 		    && (sc->sc_type != WM_T_PCH)
   2286 		    && (sc->sc_type != WM_T_PCH2)
   2287 		    && (sc->sc_type != WM_T_PCH_LPT)
   2288 		    && (sc->sc_type != WM_T_PCH_SPT)
   2289 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2290 			/* ICH* and PCH* have no PCIe capability registers */
   2291 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2292 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2293 				NULL) == 0)
   2294 				aprint_error_dev(sc->sc_dev,
   2295 				    "unable to find PCIe capability\n");
   2296 		}
   2297 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2298 	} else {
   2299 		reg = CSR_READ(sc, WMREG_STATUS);
   2300 		if (reg & STATUS_BUS64)
   2301 			sc->sc_flags |= WM_F_BUS64;
   2302 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2303 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2304 
   2305 			sc->sc_flags |= WM_F_PCIX;
   2306 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2307 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2308 				aprint_error_dev(sc->sc_dev,
   2309 				    "unable to find PCIX capability\n");
   2310 			else if (sc->sc_type != WM_T_82545_3 &&
   2311 				 sc->sc_type != WM_T_82546_3) {
   2312 				/*
   2313 				 * Work around a problem caused by the BIOS
   2314 				 * setting the max memory read byte count
   2315 				 * incorrectly.
   2316 				 */
   2317 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2318 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2319 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2320 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2321 
   2322 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2323 				    PCIX_CMD_BYTECNT_SHIFT;
   2324 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2325 				    PCIX_STATUS_MAXB_SHIFT;
   2326 				if (bytecnt > maxb) {
   2327 					aprint_verbose_dev(sc->sc_dev,
   2328 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2329 					    512 << bytecnt, 512 << maxb);
   2330 					pcix_cmd = (pcix_cmd &
   2331 					    ~PCIX_CMD_BYTECNT_MASK) |
   2332 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2333 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2334 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2335 					    pcix_cmd);
   2336 				}
   2337 			}
   2338 		}
   2339 		/*
   2340 		 * The quad port adapter is special; it has a PCIX-PCIX
   2341 		 * bridge on the board, and can run the secondary bus at
   2342 		 * a higher speed.
   2343 		 */
   2344 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2345 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2346 								      : 66;
   2347 		} else if (sc->sc_flags & WM_F_PCIX) {
   2348 			switch (reg & STATUS_PCIXSPD_MASK) {
   2349 			case STATUS_PCIXSPD_50_66:
   2350 				sc->sc_bus_speed = 66;
   2351 				break;
   2352 			case STATUS_PCIXSPD_66_100:
   2353 				sc->sc_bus_speed = 100;
   2354 				break;
   2355 			case STATUS_PCIXSPD_100_133:
   2356 				sc->sc_bus_speed = 133;
   2357 				break;
   2358 			default:
   2359 				aprint_error_dev(sc->sc_dev,
   2360 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2361 				    reg & STATUS_PCIXSPD_MASK);
   2362 				sc->sc_bus_speed = 66;
   2363 				break;
   2364 			}
   2365 		} else
   2366 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2367 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2368 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2369 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2370 	}
   2371 
   2372 	/* clear interesting stat counters */
   2373 	CSR_READ(sc, WMREG_COLC);
   2374 	CSR_READ(sc, WMREG_RXERRC);
   2375 
   2376 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2377 	    || (sc->sc_type >= WM_T_ICH8))
   2378 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2379 	if (sc->sc_type >= WM_T_ICH8)
   2380 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2381 
   2382 	/* Set PHY, NVM mutex related stuff */
   2383 	switch (sc->sc_type) {
   2384 	case WM_T_82542_2_0:
   2385 	case WM_T_82542_2_1:
   2386 	case WM_T_82543:
   2387 	case WM_T_82544:
   2388 		/* Microwire */
   2389 		sc->nvm.read = wm_nvm_read_uwire;
   2390 		sc->sc_nvm_wordsize = 64;
   2391 		sc->sc_nvm_addrbits = 6;
   2392 		break;
   2393 	case WM_T_82540:
   2394 	case WM_T_82545:
   2395 	case WM_T_82545_3:
   2396 	case WM_T_82546:
   2397 	case WM_T_82546_3:
   2398 		/* Microwire */
   2399 		sc->nvm.read = wm_nvm_read_uwire;
   2400 		reg = CSR_READ(sc, WMREG_EECD);
   2401 		if (reg & EECD_EE_SIZE) {
   2402 			sc->sc_nvm_wordsize = 256;
   2403 			sc->sc_nvm_addrbits = 8;
   2404 		} else {
   2405 			sc->sc_nvm_wordsize = 64;
   2406 			sc->sc_nvm_addrbits = 6;
   2407 		}
   2408 		sc->sc_flags |= WM_F_LOCK_EECD;
   2409 		sc->nvm.acquire = wm_get_eecd;
   2410 		sc->nvm.release = wm_put_eecd;
   2411 		break;
   2412 	case WM_T_82541:
   2413 	case WM_T_82541_2:
   2414 	case WM_T_82547:
   2415 	case WM_T_82547_2:
   2416 		reg = CSR_READ(sc, WMREG_EECD);
   2417 		/*
   2418 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2419 		 * on 8254[17], so set flags and functios before calling it.
   2420 		 */
   2421 		sc->sc_flags |= WM_F_LOCK_EECD;
   2422 		sc->nvm.acquire = wm_get_eecd;
   2423 		sc->nvm.release = wm_put_eecd;
   2424 		if (reg & EECD_EE_TYPE) {
   2425 			/* SPI */
   2426 			sc->nvm.read = wm_nvm_read_spi;
   2427 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2428 			wm_nvm_set_addrbits_size_eecd(sc);
   2429 		} else {
   2430 			/* Microwire */
   2431 			sc->nvm.read = wm_nvm_read_uwire;
   2432 			if ((reg & EECD_EE_ABITS) != 0) {
   2433 				sc->sc_nvm_wordsize = 256;
   2434 				sc->sc_nvm_addrbits = 8;
   2435 			} else {
   2436 				sc->sc_nvm_wordsize = 64;
   2437 				sc->sc_nvm_addrbits = 6;
   2438 			}
   2439 		}
   2440 		break;
   2441 	case WM_T_82571:
   2442 	case WM_T_82572:
   2443 		/* SPI */
   2444 		sc->nvm.read = wm_nvm_read_eerd;
   2445 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2446 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2447 		wm_nvm_set_addrbits_size_eecd(sc);
   2448 		sc->phy.acquire = wm_get_swsm_semaphore;
   2449 		sc->phy.release = wm_put_swsm_semaphore;
   2450 		sc->nvm.acquire = wm_get_nvm_82571;
   2451 		sc->nvm.release = wm_put_nvm_82571;
   2452 		break;
   2453 	case WM_T_82573:
   2454 	case WM_T_82574:
   2455 	case WM_T_82583:
   2456 		sc->nvm.read = wm_nvm_read_eerd;
   2457 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2458 		if (sc->sc_type == WM_T_82573) {
   2459 			sc->phy.acquire = wm_get_swsm_semaphore;
   2460 			sc->phy.release = wm_put_swsm_semaphore;
   2461 			sc->nvm.acquire = wm_get_nvm_82571;
   2462 			sc->nvm.release = wm_put_nvm_82571;
   2463 		} else {
   2464 			/* Both PHY and NVM use the same semaphore. */
   2465 			sc->phy.acquire = sc->nvm.acquire
   2466 			    = wm_get_swfwhw_semaphore;
   2467 			sc->phy.release = sc->nvm.release
   2468 			    = wm_put_swfwhw_semaphore;
   2469 		}
   2470 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2471 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2472 			sc->sc_nvm_wordsize = 2048;
   2473 		} else {
   2474 			/* SPI */
   2475 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2476 			wm_nvm_set_addrbits_size_eecd(sc);
   2477 		}
   2478 		break;
   2479 	case WM_T_82575:
   2480 	case WM_T_82576:
   2481 	case WM_T_82580:
   2482 	case WM_T_I350:
   2483 	case WM_T_I354:
   2484 	case WM_T_80003:
   2485 		/* SPI */
   2486 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2487 		wm_nvm_set_addrbits_size_eecd(sc);
   2488 		if ((sc->sc_type == WM_T_80003)
   2489 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2490 			sc->nvm.read = wm_nvm_read_eerd;
   2491 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2492 		} else {
   2493 			sc->nvm.read = wm_nvm_read_spi;
   2494 			sc->sc_flags |= WM_F_LOCK_EECD;
   2495 		}
   2496 		sc->phy.acquire = wm_get_phy_82575;
   2497 		sc->phy.release = wm_put_phy_82575;
   2498 		sc->nvm.acquire = wm_get_nvm_80003;
   2499 		sc->nvm.release = wm_put_nvm_80003;
   2500 		break;
   2501 	case WM_T_ICH8:
   2502 	case WM_T_ICH9:
   2503 	case WM_T_ICH10:
   2504 	case WM_T_PCH:
   2505 	case WM_T_PCH2:
   2506 	case WM_T_PCH_LPT:
   2507 		sc->nvm.read = wm_nvm_read_ich8;
   2508 		/* FLASH */
   2509 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2510 		sc->sc_nvm_wordsize = 2048;
   2511 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2512 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2513 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2514 			aprint_error_dev(sc->sc_dev,
   2515 			    "can't map FLASH registers\n");
   2516 			goto out;
   2517 		}
   2518 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2519 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2520 		    ICH_FLASH_SECTOR_SIZE;
   2521 		sc->sc_ich8_flash_bank_size =
   2522 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2523 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2524 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2525 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2526 		sc->sc_flashreg_offset = 0;
   2527 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2528 		sc->phy.release = wm_put_swflag_ich8lan;
   2529 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2530 		sc->nvm.release = wm_put_nvm_ich8lan;
   2531 		break;
   2532 	case WM_T_PCH_SPT:
   2533 	case WM_T_PCH_CNP:
   2534 		sc->nvm.read = wm_nvm_read_spt;
   2535 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2536 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2537 		sc->sc_flasht = sc->sc_st;
   2538 		sc->sc_flashh = sc->sc_sh;
   2539 		sc->sc_ich8_flash_base = 0;
   2540 		sc->sc_nvm_wordsize =
   2541 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2542 		    * NVM_SIZE_MULTIPLIER;
   2543 		/* It is size in bytes, we want words */
   2544 		sc->sc_nvm_wordsize /= 2;
   2545 		/* Assume 2 banks */
   2546 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2547 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2548 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2549 		sc->phy.release = wm_put_swflag_ich8lan;
   2550 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2551 		sc->nvm.release = wm_put_nvm_ich8lan;
   2552 		break;
   2553 	case WM_T_I210:
   2554 	case WM_T_I211:
   2555 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2556 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2557 		if (wm_nvm_flash_presence_i210(sc)) {
   2558 			sc->nvm.read = wm_nvm_read_eerd;
   2559 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2560 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2561 			wm_nvm_set_addrbits_size_eecd(sc);
   2562 		} else {
   2563 			sc->nvm.read = wm_nvm_read_invm;
   2564 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2565 			sc->sc_nvm_wordsize = INVM_SIZE;
   2566 		}
   2567 		sc->phy.acquire = wm_get_phy_82575;
   2568 		sc->phy.release = wm_put_phy_82575;
   2569 		sc->nvm.acquire = wm_get_nvm_80003;
   2570 		sc->nvm.release = wm_put_nvm_80003;
   2571 		break;
   2572 	default:
   2573 		break;
   2574 	}
   2575 
   2576 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2577 	switch (sc->sc_type) {
   2578 	case WM_T_82571:
   2579 	case WM_T_82572:
   2580 		reg = CSR_READ(sc, WMREG_SWSM2);
   2581 		if ((reg & SWSM2_LOCK) == 0) {
   2582 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2583 			force_clear_smbi = true;
   2584 		} else
   2585 			force_clear_smbi = false;
   2586 		break;
   2587 	case WM_T_82573:
   2588 	case WM_T_82574:
   2589 	case WM_T_82583:
   2590 		force_clear_smbi = true;
   2591 		break;
   2592 	default:
   2593 		force_clear_smbi = false;
   2594 		break;
   2595 	}
   2596 	if (force_clear_smbi) {
   2597 		reg = CSR_READ(sc, WMREG_SWSM);
   2598 		if ((reg & SWSM_SMBI) != 0)
   2599 			aprint_error_dev(sc->sc_dev,
   2600 			    "Please update the Bootagent\n");
   2601 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2602 	}
   2603 
   2604 	/*
   2605 	 * Defer printing the EEPROM type until after verifying the checksum
   2606 	 * This allows the EEPROM type to be printed correctly in the case
   2607 	 * that no EEPROM is attached.
   2608 	 */
   2609 	/*
   2610 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2611 	 * this for later, so we can fail future reads from the EEPROM.
   2612 	 */
   2613 	if (wm_nvm_validate_checksum(sc)) {
   2614 		/*
   2615 		 * Read twice again because some PCI-e parts fail the
   2616 		 * first check due to the link being in sleep state.
   2617 		 */
   2618 		if (wm_nvm_validate_checksum(sc))
   2619 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2620 	}
   2621 
   2622 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2623 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2624 	else {
   2625 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2626 		    sc->sc_nvm_wordsize);
   2627 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2628 			aprint_verbose("iNVM");
   2629 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2630 			aprint_verbose("FLASH(HW)");
   2631 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2632 			aprint_verbose("FLASH");
   2633 		else {
   2634 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2635 				eetype = "SPI";
   2636 			else
   2637 				eetype = "MicroWire";
   2638 			aprint_verbose("(%d address bits) %s EEPROM",
   2639 			    sc->sc_nvm_addrbits, eetype);
   2640 		}
   2641 	}
   2642 	wm_nvm_version(sc);
   2643 	aprint_verbose("\n");
   2644 
   2645 	/*
   2646 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2647 	 * incorrect.
   2648 	 */
   2649 	wm_gmii_setup_phytype(sc, 0, 0);
   2650 
   2651 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2652 	switch (sc->sc_type) {
   2653 	case WM_T_ICH8:
   2654 	case WM_T_ICH9:
   2655 	case WM_T_ICH10:
   2656 	case WM_T_PCH:
   2657 	case WM_T_PCH2:
   2658 	case WM_T_PCH_LPT:
   2659 	case WM_T_PCH_SPT:
   2660 	case WM_T_PCH_CNP:
   2661 		apme_mask = WUC_APME;
   2662 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2663 		if ((eeprom_data & apme_mask) != 0)
   2664 			sc->sc_flags |= WM_F_WOL;
   2665 		break;
   2666 	default:
   2667 		break;
   2668 	}
   2669 
   2670 	/* Reset the chip to a known state. */
   2671 	wm_reset(sc);
   2672 
   2673 	/*
   2674 	 * Check for I21[01] PLL workaround.
   2675 	 *
   2676 	 * Three cases:
   2677 	 * a) Chip is I211.
   2678 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2679 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2680 	 */
   2681 	if (sc->sc_type == WM_T_I211)
   2682 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2683 	if (sc->sc_type == WM_T_I210) {
   2684 		if (!wm_nvm_flash_presence_i210(sc))
   2685 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2686 		else if ((sc->sc_nvm_ver_major < 3)
   2687 		    || ((sc->sc_nvm_ver_major == 3)
   2688 			&& (sc->sc_nvm_ver_minor < 25))) {
   2689 			aprint_verbose_dev(sc->sc_dev,
   2690 			    "ROM image version %d.%d is older than 3.25\n",
   2691 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2692 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2693 		}
   2694 	}
   2695 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2696 		wm_pll_workaround_i210(sc);
   2697 
   2698 	wm_get_wakeup(sc);
   2699 
   2700 	/* Non-AMT based hardware can now take control from firmware */
   2701 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2702 		wm_get_hw_control(sc);
   2703 
   2704 	/*
   2705 	 * Read the Ethernet address from the EEPROM, if not first found
   2706 	 * in device properties.
   2707 	 */
   2708 	ea = prop_dictionary_get(dict, "mac-address");
   2709 	if (ea != NULL) {
   2710 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2711 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2712 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2713 	} else {
   2714 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2715 			aprint_error_dev(sc->sc_dev,
   2716 			    "unable to read Ethernet address\n");
   2717 			goto out;
   2718 		}
   2719 	}
   2720 
   2721 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2722 	    ether_sprintf(enaddr));
   2723 
   2724 	/*
   2725 	 * Read the config info from the EEPROM, and set up various
   2726 	 * bits in the control registers based on their contents.
   2727 	 */
   2728 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2729 	if (pn != NULL) {
   2730 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2731 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2732 	} else {
   2733 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2734 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2735 			goto out;
   2736 		}
   2737 	}
   2738 
   2739 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2740 	if (pn != NULL) {
   2741 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2742 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2743 	} else {
   2744 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2745 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2746 			goto out;
   2747 		}
   2748 	}
   2749 
   2750 	/* check for WM_F_WOL */
   2751 	switch (sc->sc_type) {
   2752 	case WM_T_82542_2_0:
   2753 	case WM_T_82542_2_1:
   2754 	case WM_T_82543:
   2755 		/* dummy? */
   2756 		eeprom_data = 0;
   2757 		apme_mask = NVM_CFG3_APME;
   2758 		break;
   2759 	case WM_T_82544:
   2760 		apme_mask = NVM_CFG2_82544_APM_EN;
   2761 		eeprom_data = cfg2;
   2762 		break;
   2763 	case WM_T_82546:
   2764 	case WM_T_82546_3:
   2765 	case WM_T_82571:
   2766 	case WM_T_82572:
   2767 	case WM_T_82573:
   2768 	case WM_T_82574:
   2769 	case WM_T_82583:
   2770 	case WM_T_80003:
   2771 	case WM_T_82575:
   2772 	case WM_T_82576:
   2773 		apme_mask = NVM_CFG3_APME;
   2774 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2775 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2776 		break;
   2777 	case WM_T_82580:
   2778 	case WM_T_I350:
   2779 	case WM_T_I354:
   2780 	case WM_T_I210:
   2781 	case WM_T_I211:
   2782 		apme_mask = NVM_CFG3_APME;
   2783 		wm_nvm_read(sc,
   2784 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2785 		    1, &eeprom_data);
   2786 		break;
   2787 	case WM_T_ICH8:
   2788 	case WM_T_ICH9:
   2789 	case WM_T_ICH10:
   2790 	case WM_T_PCH:
   2791 	case WM_T_PCH2:
   2792 	case WM_T_PCH_LPT:
   2793 	case WM_T_PCH_SPT:
   2794 	case WM_T_PCH_CNP:
   2795 		/* Already checked before wm_reset () */
   2796 		apme_mask = eeprom_data = 0;
   2797 		break;
   2798 	default: /* XXX 82540 */
   2799 		apme_mask = NVM_CFG3_APME;
   2800 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2801 		break;
   2802 	}
   2803 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2804 	if ((eeprom_data & apme_mask) != 0)
   2805 		sc->sc_flags |= WM_F_WOL;
   2806 
   2807 	/*
   2808 	 * We have the eeprom settings, now apply the special cases
   2809 	 * where the eeprom may be wrong or the board won't support
   2810 	 * wake on lan on a particular port
   2811 	 */
   2812 	switch (sc->sc_pcidevid) {
   2813 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2814 		sc->sc_flags &= ~WM_F_WOL;
   2815 		break;
   2816 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2817 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2818 		/* Wake events only supported on port A for dual fiber
   2819 		 * regardless of eeprom setting */
   2820 		if (sc->sc_funcid == 1)
   2821 			sc->sc_flags &= ~WM_F_WOL;
   2822 		break;
   2823 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2824 		/* If quad port adapter, disable WoL on all but port A */
   2825 		if (sc->sc_funcid != 0)
   2826 			sc->sc_flags &= ~WM_F_WOL;
   2827 		break;
   2828 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2829 		/* Wake events only supported on port A for dual fiber
   2830 		 * regardless of eeprom setting */
   2831 		if (sc->sc_funcid == 1)
   2832 			sc->sc_flags &= ~WM_F_WOL;
   2833 		break;
   2834 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2835 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2836 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2837 		/* If quad port adapter, disable WoL on all but port A */
   2838 		if (sc->sc_funcid != 0)
   2839 			sc->sc_flags &= ~WM_F_WOL;
   2840 		break;
   2841 	}
   2842 
   2843 	if (sc->sc_type >= WM_T_82575) {
   2844 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2845 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2846 			    nvmword);
   2847 			if ((sc->sc_type == WM_T_82575) ||
   2848 			    (sc->sc_type == WM_T_82576)) {
   2849 				/* Check NVM for autonegotiation */
   2850 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2851 				    != 0)
   2852 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2853 			}
   2854 			if ((sc->sc_type == WM_T_82575) ||
   2855 			    (sc->sc_type == WM_T_I350)) {
   2856 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2857 					sc->sc_flags |= WM_F_MAS;
   2858 			}
   2859 		}
   2860 	}
   2861 
   2862 	/*
   2863 	 * XXX need special handling for some multiple port cards
   2864 	 * to disable a paticular port.
   2865 	 */
   2866 
   2867 	if (sc->sc_type >= WM_T_82544) {
   2868 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2869 		if (pn != NULL) {
   2870 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2871 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2872 		} else {
   2873 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2874 				aprint_error_dev(sc->sc_dev,
   2875 				    "unable to read SWDPIN\n");
   2876 				goto out;
   2877 			}
   2878 		}
   2879 	}
   2880 
   2881 	if (cfg1 & NVM_CFG1_ILOS)
   2882 		sc->sc_ctrl |= CTRL_ILOS;
   2883 
   2884 	/*
   2885 	 * XXX
   2886 	 * This code isn't correct because pin 2 and 3 are located
   2887 	 * in different position on newer chips. Check all datasheet.
   2888 	 *
   2889 	 * Until resolve this problem, check if a chip < 82580
   2890 	 */
   2891 	if (sc->sc_type <= WM_T_82580) {
   2892 		if (sc->sc_type >= WM_T_82544) {
   2893 			sc->sc_ctrl |=
   2894 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2895 			    CTRL_SWDPIO_SHIFT;
   2896 			sc->sc_ctrl |=
   2897 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2898 			    CTRL_SWDPINS_SHIFT;
   2899 		} else {
   2900 			sc->sc_ctrl |=
   2901 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2902 			    CTRL_SWDPIO_SHIFT;
   2903 		}
   2904 	}
   2905 
   2906 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2907 		wm_nvm_read(sc,
   2908 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2909 		    1, &nvmword);
   2910 		if (nvmword & NVM_CFG3_ILOS)
   2911 			sc->sc_ctrl |= CTRL_ILOS;
   2912 	}
   2913 
   2914 #if 0
   2915 	if (sc->sc_type >= WM_T_82544) {
   2916 		if (cfg1 & NVM_CFG1_IPS0)
   2917 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2918 		if (cfg1 & NVM_CFG1_IPS1)
   2919 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2920 		sc->sc_ctrl_ext |=
   2921 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2922 		    CTRL_EXT_SWDPIO_SHIFT;
   2923 		sc->sc_ctrl_ext |=
   2924 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2925 		    CTRL_EXT_SWDPINS_SHIFT;
   2926 	} else {
   2927 		sc->sc_ctrl_ext |=
   2928 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2929 		    CTRL_EXT_SWDPIO_SHIFT;
   2930 	}
   2931 #endif
   2932 
   2933 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2934 #if 0
   2935 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2936 #endif
   2937 
   2938 	if (sc->sc_type == WM_T_PCH) {
   2939 		uint16_t val;
   2940 
   2941 		/* Save the NVM K1 bit setting */
   2942 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2943 
   2944 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2945 			sc->sc_nvm_k1_enabled = 1;
   2946 		else
   2947 			sc->sc_nvm_k1_enabled = 0;
   2948 	}
   2949 
   2950 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2951 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2952 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2953 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2954 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2955 	    || sc->sc_type == WM_T_82573
   2956 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2957 		/* Copper only */
   2958 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2959 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2960 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2961 	    || (sc->sc_type ==WM_T_I211)) {
   2962 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2963 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2964 		switch (link_mode) {
   2965 		case CTRL_EXT_LINK_MODE_1000KX:
   2966 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2967 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2968 			break;
   2969 		case CTRL_EXT_LINK_MODE_SGMII:
   2970 			if (wm_sgmii_uses_mdio(sc)) {
   2971 				aprint_normal_dev(sc->sc_dev,
   2972 				    "SGMII(MDIO)\n");
   2973 				sc->sc_flags |= WM_F_SGMII;
   2974 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2975 				break;
   2976 			}
   2977 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2978 			/*FALLTHROUGH*/
   2979 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2980 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2981 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2982 				if (link_mode
   2983 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2984 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2985 					sc->sc_flags |= WM_F_SGMII;
   2986 					aprint_verbose_dev(sc->sc_dev,
   2987 					    "SGMII\n");
   2988 				} else {
   2989 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2990 					aprint_verbose_dev(sc->sc_dev,
   2991 					    "SERDES\n");
   2992 				}
   2993 				break;
   2994 			}
   2995 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2996 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2997 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2998 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2999 				sc->sc_flags |= WM_F_SGMII;
   3000 			}
   3001 			/* Do not change link mode for 100BaseFX */
   3002 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   3003 				break;
   3004 
   3005 			/* Change current link mode setting */
   3006 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   3007 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3008 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   3009 			else
   3010 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   3011 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3012 			break;
   3013 		case CTRL_EXT_LINK_MODE_GMII:
   3014 		default:
   3015 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   3016 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3017 			break;
   3018 		}
   3019 
   3020 		reg &= ~CTRL_EXT_I2C_ENA;
   3021 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   3022 			reg |= CTRL_EXT_I2C_ENA;
   3023 		else
   3024 			reg &= ~CTRL_EXT_I2C_ENA;
   3025 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3026 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   3027 			if (!wm_sgmii_uses_mdio(sc))
   3028 				wm_gmii_setup_phytype(sc, 0, 0);
   3029 			wm_reset_mdicnfg_82580(sc);
   3030 		}
   3031 	} else if (sc->sc_type < WM_T_82543 ||
   3032 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   3033 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3034 			aprint_error_dev(sc->sc_dev,
   3035 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   3036 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   3037 		}
   3038 	} else {
   3039 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   3040 			aprint_error_dev(sc->sc_dev,
   3041 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   3042 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3043 		}
   3044 	}
   3045 
   3046 	if (sc->sc_type >= WM_T_PCH2)
   3047 		sc->sc_flags |= WM_F_EEE;
   3048 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   3049 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   3050 		/* XXX: Need special handling for I354. (not yet) */
   3051 		if (sc->sc_type != WM_T_I354)
   3052 			sc->sc_flags |= WM_F_EEE;
   3053 	}
   3054 
   3055 	/*
   3056 	 * The I350 has a bug where it always strips the CRC whether
   3057 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   3058 	 */
   3059 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3060 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3061 		sc->sc_flags |= WM_F_CRC_STRIP;
   3062 
   3063 	/* Set device properties (macflags) */
   3064 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   3065 
   3066 	if (sc->sc_flags != 0) {
   3067 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   3068 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   3069 	}
   3070 
   3071 #ifdef WM_MPSAFE
   3072 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   3073 #else
   3074 	sc->sc_core_lock = NULL;
   3075 #endif
   3076 
   3077 	/* Initialize the media structures accordingly. */
   3078 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3079 		wm_gmii_mediainit(sc, wmp->wmp_product);
   3080 	else
   3081 		wm_tbi_mediainit(sc); /* All others */
   3082 
   3083 	ifp = &sc->sc_ethercom.ec_if;
   3084 	xname = device_xname(sc->sc_dev);
   3085 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3086 	ifp->if_softc = sc;
   3087 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3088 #ifdef WM_MPSAFE
   3089 	ifp->if_extflags = IFEF_MPSAFE;
   3090 #endif
   3091 	ifp->if_ioctl = wm_ioctl;
   3092 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3093 		ifp->if_start = wm_nq_start;
   3094 		/*
   3095 		 * When the number of CPUs is one and the controller can use
   3096 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3097 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3098 		 * and the other is used for link status changing.
   3099 		 * In this situation, wm_nq_transmit() is disadvantageous
   3100 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3101 		 */
   3102 		if (wm_is_using_multiqueue(sc))
   3103 			ifp->if_transmit = wm_nq_transmit;
   3104 	} else {
   3105 		ifp->if_start = wm_start;
   3106 		/*
   3107 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
   3108 		 * described above.
   3109 		 */
   3110 		if (wm_is_using_multiqueue(sc))
   3111 			ifp->if_transmit = wm_transmit;
   3112 	}
   3113 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3114 	ifp->if_init = wm_init;
   3115 	ifp->if_stop = wm_stop;
   3116 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3117 	IFQ_SET_READY(&ifp->if_snd);
   3118 
   3119 	/* Check for jumbo frame */
   3120 	switch (sc->sc_type) {
   3121 	case WM_T_82573:
   3122 		/* XXX limited to 9234 if ASPM is disabled */
   3123 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3124 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3125 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3126 		break;
   3127 	case WM_T_82571:
   3128 	case WM_T_82572:
   3129 	case WM_T_82574:
   3130 	case WM_T_82583:
   3131 	case WM_T_82575:
   3132 	case WM_T_82576:
   3133 	case WM_T_82580:
   3134 	case WM_T_I350:
   3135 	case WM_T_I354:
   3136 	case WM_T_I210:
   3137 	case WM_T_I211:
   3138 	case WM_T_80003:
   3139 	case WM_T_ICH9:
   3140 	case WM_T_ICH10:
   3141 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3142 	case WM_T_PCH_LPT:
   3143 	case WM_T_PCH_SPT:
   3144 	case WM_T_PCH_CNP:
   3145 		/* XXX limited to 9234 */
   3146 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3147 		break;
   3148 	case WM_T_PCH:
   3149 		/* XXX limited to 4096 */
   3150 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3151 		break;
   3152 	case WM_T_82542_2_0:
   3153 	case WM_T_82542_2_1:
   3154 	case WM_T_ICH8:
   3155 		/* No support for jumbo frame */
   3156 		break;
   3157 	default:
   3158 		/* ETHER_MAX_LEN_JUMBO */
   3159 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3160 		break;
   3161 	}
   3162 
   3163 	/* If we're a i82543 or greater, we can support VLANs. */
   3164 	if (sc->sc_type >= WM_T_82543) {
   3165 		sc->sc_ethercom.ec_capabilities |=
   3166 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3167 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3168 	}
   3169 
   3170 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3171 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3172 
   3173 	/*
   3174 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
   3175 	 * on i82543 and later.
   3176 	 */
   3177 	if (sc->sc_type >= WM_T_82543) {
   3178 		ifp->if_capabilities |=
   3179 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3180 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3181 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3182 		    IFCAP_CSUM_TCPv6_Tx |
   3183 		    IFCAP_CSUM_UDPv6_Tx;
   3184 	}
   3185 
   3186 	/*
   3187 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3188 	 *
   3189 	 *	82541GI (8086:1076) ... no
   3190 	 *	82572EI (8086:10b9) ... yes
   3191 	 */
   3192 	if (sc->sc_type >= WM_T_82571) {
   3193 		ifp->if_capabilities |=
   3194 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3195 	}
   3196 
   3197 	/*
   3198 	 * If we're a i82544 or greater (except i82547), we can do
   3199 	 * TCP segmentation offload.
   3200 	 */
   3201 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
   3202 		ifp->if_capabilities |= IFCAP_TSOv4;
   3203 
   3204 	if (sc->sc_type >= WM_T_82571)
   3205 		ifp->if_capabilities |= IFCAP_TSOv6;
   3206 
   3207 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3208 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3209 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3210 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3211 
   3212 	/* Attach the interface. */
   3213 	if_initialize(ifp);
   3214 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3215 	ether_ifattach(ifp, enaddr);
   3216 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3217 	if_register(ifp);
   3218 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3219 	    RND_FLAG_DEFAULT);
   3220 
   3221 #ifdef WM_EVENT_COUNTERS
   3222 	/* Attach event counters. */
   3223 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3224 	    NULL, xname, "linkintr");
   3225 
   3226 	if (sc->sc_type >= WM_T_82542_2_1) {
   3227 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3228 		    NULL, xname, "tx_xoff");
   3229 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3230 		    NULL, xname, "tx_xon");
   3231 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3232 		    NULL, xname, "rx_xoff");
   3233 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3234 		    NULL, xname, "rx_xon");
   3235 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3236 		    NULL, xname, "rx_macctl");
   3237 	}
   3238 
   3239 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
   3240 	    NULL, xname, "CRC Error");
   3241 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
   3242 	    NULL, xname, "Symbol Error");
   3243 
   3244 	if (sc->sc_type >= WM_T_82543) {
   3245 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
   3246 		    NULL, xname, "Alignment Error");
   3247 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
   3248 		    NULL, xname, "Receive Error");
   3249 		evcnt_attach_dynamic(&sc->sc_ev_cexterr, EVCNT_TYPE_MISC,
   3250 		    NULL, xname, "Carrier Extension Error");
   3251 	}
   3252 
   3253 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
   3254 	    NULL, xname, "Missed Packets");
   3255 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
   3256 	    NULL, xname, "Collision");
   3257 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
   3258 	    NULL, xname, "Sequence Error");
   3259 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
   3260 	    NULL, xname, "Receive Length Error");
   3261 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
   3262 	    NULL, xname, "Single Collision");
   3263 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
   3264 	    NULL, xname, "Excessive Collisions");
   3265 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
   3266 	    NULL, xname, "Multiple Collision");
   3267 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
   3268 	    NULL, xname, "Late Collisions");
   3269 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
   3270 	    NULL, xname, "Defer");
   3271 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
   3272 	    NULL, xname, "Good Packets Rx");
   3273 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
   3274 	    NULL, xname, "Broadcast Packets Rx");
   3275 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
   3276 	    NULL, xname, "Multicast Packets Rx");
   3277 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
   3278 	    NULL, xname, "Good Packets Tx");
   3279 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
   3280 	    NULL, xname, "Good Octets Rx");
   3281 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
   3282 	    NULL, xname, "Good Octets Tx");
   3283 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
   3284 	    NULL, xname, "Rx No Buffers");
   3285 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
   3286 	    NULL, xname, "Rx Undersize");
   3287 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
   3288 	    NULL, xname, "Rx Fragment");
   3289 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
   3290 	    NULL, xname, "Rx Oversize");
   3291 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
   3292 	    NULL, xname, "Rx Jabber");
   3293 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
   3294 	    NULL, xname, "Total Octets Rx");
   3295 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
   3296 	    NULL, xname, "Total Octets Tx");
   3297 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
   3298 	    NULL, xname, "Total Packets Rx");
   3299 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
   3300 	    NULL, xname, "Total Packets Tx");
   3301 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
   3302 	    NULL, xname, "Multicast Packets Tx");
   3303 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
   3304 	    NULL, xname, "Broadcast Packets Tx Count");
   3305 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
   3306 	    NULL, xname, "Packets Rx (64 bytes)");
   3307 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
   3308 	    NULL, xname, "Packets Rx (65-127 bytes)");
   3309 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
   3310 	    NULL, xname, "Packets Rx (128-255 bytes)");
   3311 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
   3312 	    NULL, xname, "Packets Rx (255-511 bytes)");
   3313 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
   3314 	    NULL, xname, "Packets Rx (512-1023 bytes)");
   3315 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
   3316 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
   3317 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
   3318 	    NULL, xname, "Packets Tx (64 bytes)");
   3319 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
   3320 	    NULL, xname, "Packets Tx (65-127 bytes)");
   3321 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
   3322 	    NULL, xname, "Packets Tx (128-255 bytes)");
   3323 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
   3324 	    NULL, xname, "Packets Tx (256-511 bytes)");
   3325 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
   3326 	    NULL, xname, "Packets Tx (512-1023 bytes)");
   3327 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
   3328 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
   3329 	evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
   3330 	    NULL, xname, "Interrupt Assertion");
   3331 	evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
   3332 	    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
   3333 	evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
   3334 	    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
   3335 	evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
   3336 	    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
   3337 	evcnt_attach_dynamic(&sc->sc_ev_ictxact, EVCNT_TYPE_MISC,
   3338 	    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
   3339 	evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
   3340 	    NULL, xname, "Intr. Cause Tx Queue Empty");
   3341 	evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
   3342 	    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
   3343 	evcnt_attach_dynamic(&sc->sc_ev_icrxdmtc, EVCNT_TYPE_MISC,
   3344 	    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
   3345 	evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
   3346 	    NULL, xname, "Interrupt Cause Receiver Overrun");
   3347 	if (sc->sc_type >= WM_T_82543) {
   3348 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
   3349 		    NULL, xname, "Tx with No CRS");
   3350 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
   3351 		    NULL, xname, "TCP Segmentation Context Tx");
   3352 		evcnt_attach_dynamic(&sc->sc_ev_tsctfc, EVCNT_TYPE_MISC,
   3353 		    NULL, xname, "TCP Segmentation Context Tx Fail");
   3354 	}
   3355 	if (sc->sc_type >= WM_T_82540) {
   3356 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
   3357 		    NULL, xname, "Management Packets RX");
   3358 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
   3359 		    NULL, xname, "Management Packets Dropped");
   3360 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
   3361 		    NULL, xname, "Management Packets TX");
   3362 	}
   3363 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
   3364 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
   3365 		    NULL, xname, "BMC2OS Packets received by host");
   3366 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
   3367 		    NULL, xname, "OS2BMC Packets transmitted by host");
   3368 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
   3369 		    NULL, xname, "BMC2OS Packets sent by BMC");
   3370 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
   3371 		    NULL, xname, "OS2BMC Packets received by BMC");
   3372 	}
   3373 #endif /* WM_EVENT_COUNTERS */
   3374 
   3375 	sc->sc_txrx_use_workqueue = false;
   3376 
   3377 	if (wm_phy_need_linkdown_discard(sc)) {
   3378 		DPRINTF(sc, WM_DEBUG_LINK,
   3379 		    ("%s: %s: Set linkdown discard flag\n",
   3380 			device_xname(sc->sc_dev), __func__));
   3381 		wm_set_linkdown_discard(sc);
   3382 	}
   3383 
   3384 	wm_init_sysctls(sc);
   3385 
   3386 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3387 		pmf_class_network_register(self, ifp);
   3388 	else
   3389 		aprint_error_dev(self, "couldn't establish power handler\n");
   3390 
   3391 	sc->sc_flags |= WM_F_ATTACHED;
   3392 out:
   3393 	return;
   3394 }
   3395 
   3396 /* The detach function (ca_detach) */
   3397 static int
   3398 wm_detach(device_t self, int flags __unused)
   3399 {
   3400 	struct wm_softc *sc = device_private(self);
   3401 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3402 	int i;
   3403 
   3404 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3405 		return 0;
   3406 
   3407 	/* Stop the interface. Callouts are stopped in it. */
   3408 	IFNET_LOCK(ifp);
   3409 	sc->sc_dying = true;
   3410 	wm_stop(ifp, 1);
   3411 	IFNET_UNLOCK(ifp);
   3412 
   3413 	pmf_device_deregister(self);
   3414 
   3415 	sysctl_teardown(&sc->sc_sysctllog);
   3416 
   3417 #ifdef WM_EVENT_COUNTERS
   3418 	evcnt_detach(&sc->sc_ev_linkintr);
   3419 
   3420 	if (sc->sc_type >= WM_T_82542_2_1) {
   3421 		evcnt_detach(&sc->sc_ev_tx_xoff);
   3422 		evcnt_detach(&sc->sc_ev_tx_xon);
   3423 		evcnt_detach(&sc->sc_ev_rx_xoff);
   3424 		evcnt_detach(&sc->sc_ev_rx_xon);
   3425 		evcnt_detach(&sc->sc_ev_rx_macctl);
   3426 	}
   3427 
   3428 	evcnt_detach(&sc->sc_ev_crcerrs);
   3429 	evcnt_detach(&sc->sc_ev_symerrc);
   3430 
   3431 	if (sc->sc_type >= WM_T_82543) {
   3432 		evcnt_detach(&sc->sc_ev_algnerrc);
   3433 		evcnt_detach(&sc->sc_ev_rxerrc);
   3434 		evcnt_detach(&sc->sc_ev_cexterr);
   3435 	}
   3436 	evcnt_detach(&sc->sc_ev_mpc);
   3437 	evcnt_detach(&sc->sc_ev_colc);
   3438 	evcnt_detach(&sc->sc_ev_sec);
   3439 	evcnt_detach(&sc->sc_ev_rlec);
   3440 	evcnt_detach(&sc->sc_ev_scc);
   3441 	evcnt_detach(&sc->sc_ev_ecol);
   3442 	evcnt_detach(&sc->sc_ev_mcc);
   3443 	evcnt_detach(&sc->sc_ev_latecol);
   3444 	evcnt_detach(&sc->sc_ev_dc);
   3445 	evcnt_detach(&sc->sc_ev_gprc);
   3446 	evcnt_detach(&sc->sc_ev_bprc);
   3447 	evcnt_detach(&sc->sc_ev_mprc);
   3448 	evcnt_detach(&sc->sc_ev_gptc);
   3449 	evcnt_detach(&sc->sc_ev_gorc);
   3450 	evcnt_detach(&sc->sc_ev_gotc);
   3451 	evcnt_detach(&sc->sc_ev_rnbc);
   3452 	evcnt_detach(&sc->sc_ev_ruc);
   3453 	evcnt_detach(&sc->sc_ev_rfc);
   3454 	evcnt_detach(&sc->sc_ev_roc);
   3455 	evcnt_detach(&sc->sc_ev_rjc);
   3456 	evcnt_detach(&sc->sc_ev_tor);
   3457 	evcnt_detach(&sc->sc_ev_tot);
   3458 	evcnt_detach(&sc->sc_ev_tpr);
   3459 	evcnt_detach(&sc->sc_ev_tpt);
   3460 	evcnt_detach(&sc->sc_ev_mptc);
   3461 	evcnt_detach(&sc->sc_ev_bptc);
   3462 	evcnt_detach(&sc->sc_ev_prc64);
   3463 	evcnt_detach(&sc->sc_ev_prc127);
   3464 	evcnt_detach(&sc->sc_ev_prc255);
   3465 	evcnt_detach(&sc->sc_ev_prc511);
   3466 	evcnt_detach(&sc->sc_ev_prc1023);
   3467 	evcnt_detach(&sc->sc_ev_prc1522);
   3468 	evcnt_detach(&sc->sc_ev_ptc64);
   3469 	evcnt_detach(&sc->sc_ev_ptc127);
   3470 	evcnt_detach(&sc->sc_ev_ptc255);
   3471 	evcnt_detach(&sc->sc_ev_ptc511);
   3472 	evcnt_detach(&sc->sc_ev_ptc1023);
   3473 	evcnt_detach(&sc->sc_ev_ptc1522);
   3474 	evcnt_detach(&sc->sc_ev_iac);
   3475 	evcnt_detach(&sc->sc_ev_icrxptc);
   3476 	evcnt_detach(&sc->sc_ev_icrxatc);
   3477 	evcnt_detach(&sc->sc_ev_ictxptc);
   3478 	evcnt_detach(&sc->sc_ev_ictxact);
   3479 	evcnt_detach(&sc->sc_ev_ictxqec);
   3480 	evcnt_detach(&sc->sc_ev_ictxqmtc);
   3481 	evcnt_detach(&sc->sc_ev_icrxdmtc);
   3482 	evcnt_detach(&sc->sc_ev_icrxoc);
   3483 	if (sc->sc_type >= WM_T_82543) {
   3484 		evcnt_detach(&sc->sc_ev_tncrs);
   3485 		evcnt_detach(&sc->sc_ev_tsctc);
   3486 		evcnt_detach(&sc->sc_ev_tsctfc);
   3487 	}
   3488 	if (sc->sc_type >= WM_T_82540) {
   3489 		evcnt_detach(&sc->sc_ev_mgtprc);
   3490 		evcnt_detach(&sc->sc_ev_mgtpdc);
   3491 		evcnt_detach(&sc->sc_ev_mgtptc);
   3492 	}
   3493 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
   3494 		evcnt_detach(&sc->sc_ev_b2ogprc);
   3495 		evcnt_detach(&sc->sc_ev_o2bspc);
   3496 		evcnt_detach(&sc->sc_ev_b2ospc);
   3497 		evcnt_detach(&sc->sc_ev_o2bgptc);
   3498 	}
   3499 #endif /* WM_EVENT_COUNTERS */
   3500 
   3501 	rnd_detach_source(&sc->rnd_source);
   3502 
   3503 	/* Tell the firmware about the release */
   3504 	WM_CORE_LOCK(sc);
   3505 	wm_release_manageability(sc);
   3506 	wm_release_hw_control(sc);
   3507 	wm_enable_wakeup(sc);
   3508 	WM_CORE_UNLOCK(sc);
   3509 
   3510 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3511 
   3512 	ether_ifdetach(ifp);
   3513 	if_detach(ifp);
   3514 	if_percpuq_destroy(sc->sc_ipq);
   3515 
   3516 	/* Delete all remaining media. */
   3517 	ifmedia_fini(&sc->sc_mii.mii_media);
   3518 
   3519 	/* Unload RX dmamaps and free mbufs */
   3520 	for (i = 0; i < sc->sc_nqueues; i++) {
   3521 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3522 		mutex_enter(rxq->rxq_lock);
   3523 		wm_rxdrain(rxq);
   3524 		mutex_exit(rxq->rxq_lock);
   3525 	}
   3526 	/* Must unlock here */
   3527 
   3528 	/* Disestablish the interrupt handler */
   3529 	for (i = 0; i < sc->sc_nintrs; i++) {
   3530 		if (sc->sc_ihs[i] != NULL) {
   3531 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3532 			sc->sc_ihs[i] = NULL;
   3533 		}
   3534 	}
   3535 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3536 
   3537 	/* wm_stop() ensured that the workqueues are stopped. */
   3538 	workqueue_destroy(sc->sc_queue_wq);
   3539 	workqueue_destroy(sc->sc_reset_wq);
   3540 
   3541 	for (i = 0; i < sc->sc_nqueues; i++)
   3542 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3543 
   3544 	wm_free_txrx_queues(sc);
   3545 
   3546 	/* Unmap the registers */
   3547 	if (sc->sc_ss) {
   3548 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3549 		sc->sc_ss = 0;
   3550 	}
   3551 	if (sc->sc_ios) {
   3552 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3553 		sc->sc_ios = 0;
   3554 	}
   3555 	if (sc->sc_flashs) {
   3556 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3557 		sc->sc_flashs = 0;
   3558 	}
   3559 
   3560 	if (sc->sc_core_lock)
   3561 		mutex_obj_free(sc->sc_core_lock);
   3562 	if (sc->sc_ich_phymtx)
   3563 		mutex_obj_free(sc->sc_ich_phymtx);
   3564 	if (sc->sc_ich_nvmmtx)
   3565 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3566 
   3567 	return 0;
   3568 }
   3569 
   3570 static bool
   3571 wm_suspend(device_t self, const pmf_qual_t *qual)
   3572 {
   3573 	struct wm_softc *sc = device_private(self);
   3574 
   3575 	wm_release_manageability(sc);
   3576 	wm_release_hw_control(sc);
   3577 	wm_enable_wakeup(sc);
   3578 
   3579 	return true;
   3580 }
   3581 
   3582 static bool
   3583 wm_resume(device_t self, const pmf_qual_t *qual)
   3584 {
   3585 	struct wm_softc *sc = device_private(self);
   3586 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3587 	pcireg_t reg;
   3588 	char buf[256];
   3589 
   3590 	reg = CSR_READ(sc, WMREG_WUS);
   3591 	if (reg != 0) {
   3592 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3593 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3594 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3595 	}
   3596 
   3597 	if (sc->sc_type >= WM_T_PCH2)
   3598 		wm_resume_workarounds_pchlan(sc);
   3599 	IFNET_LOCK(ifp);
   3600 	if ((ifp->if_flags & IFF_UP) == 0) {
   3601 		/* >= PCH_SPT hardware workaround before reset. */
   3602 		if (sc->sc_type >= WM_T_PCH_SPT)
   3603 			wm_flush_desc_rings(sc);
   3604 
   3605 		wm_reset(sc);
   3606 		/* Non-AMT based hardware can now take control from firmware */
   3607 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3608 			wm_get_hw_control(sc);
   3609 		wm_init_manageability(sc);
   3610 	} else {
   3611 		/*
   3612 		 * We called pmf_class_network_register(), so if_init() is
   3613 		 * automatically called when IFF_UP. wm_reset(),
   3614 		 * wm_get_hw_control() and wm_init_manageability() are called
   3615 		 * via wm_init().
   3616 		 */
   3617 	}
   3618 	IFNET_UNLOCK(ifp);
   3619 
   3620 	return true;
   3621 }
   3622 
   3623 /*
   3624  * wm_watchdog:
   3625  *
   3626  *	Watchdog checker.
   3627  */
   3628 static bool
   3629 wm_watchdog(struct ifnet *ifp)
   3630 {
   3631 	int qid;
   3632 	struct wm_softc *sc = ifp->if_softc;
   3633 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3634 
   3635 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3636 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3637 
   3638 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3639 	}
   3640 
   3641 #ifdef WM_DEBUG
   3642 	if (sc->sc_trigger_reset) {
   3643 		/* debug operation, no need for atomicity or reliability */
   3644 		sc->sc_trigger_reset = 0;
   3645 		hang_queue++;
   3646 	}
   3647 #endif
   3648 
   3649 	if (hang_queue == 0)
   3650 		return true;
   3651 
   3652 	if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
   3653 		workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
   3654 
   3655 	return false;
   3656 }
   3657 
   3658 /*
   3659  * Perform an interface watchdog reset.
   3660  */
   3661 static void
   3662 wm_handle_reset_work(struct work *work, void *arg)
   3663 {
   3664 	struct wm_softc * const sc = arg;
   3665 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
   3666 
   3667 	/* Don't want ioctl operations to happen */
   3668 	IFNET_LOCK(ifp);
   3669 
   3670 	/* reset the interface. */
   3671 	wm_init(ifp);
   3672 
   3673 	IFNET_UNLOCK(ifp);
   3674 
   3675 	/*
   3676 	 * There are still some upper layer processing which call
   3677 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   3678 	 */
   3679 	/* Try to get more packets going. */
   3680 	ifp->if_start(ifp);
   3681 
   3682 	atomic_store_relaxed(&sc->sc_reset_pending, 0);
   3683 }
   3684 
   3685 
   3686 static void
   3687 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3688 {
   3689 
   3690 	mutex_enter(txq->txq_lock);
   3691 	if (txq->txq_sending &&
   3692 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3693 		wm_watchdog_txq_locked(ifp, txq, hang);
   3694 
   3695 	mutex_exit(txq->txq_lock);
   3696 }
   3697 
   3698 static void
   3699 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3700     uint16_t *hang)
   3701 {
   3702 	struct wm_softc *sc = ifp->if_softc;
   3703 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3704 
   3705 	KASSERT(mutex_owned(txq->txq_lock));
   3706 
   3707 	/*
   3708 	 * Since we're using delayed interrupts, sweep up
   3709 	 * before we report an error.
   3710 	 */
   3711 	wm_txeof(txq, UINT_MAX);
   3712 
   3713 	if (txq->txq_sending)
   3714 		*hang |= __BIT(wmq->wmq_id);
   3715 
   3716 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3717 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3718 		    device_xname(sc->sc_dev));
   3719 	} else {
   3720 #ifdef WM_DEBUG
   3721 		int i, j;
   3722 		struct wm_txsoft *txs;
   3723 #endif
   3724 		log(LOG_ERR,
   3725 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3726 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3727 		    txq->txq_next);
   3728 		if_statinc(ifp, if_oerrors);
   3729 #ifdef WM_DEBUG
   3730 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3731 		    i = WM_NEXTTXS(txq, i)) {
   3732 			txs = &txq->txq_soft[i];
   3733 			printf("txs %d tx %d -> %d\n",
   3734 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3735 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3736 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3737 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3738 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3739 					printf("\t %#08x%08x\n",
   3740 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3741 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3742 				} else {
   3743 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3744 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3745 					    txq->txq_descs[j].wtx_addr.wa_low);
   3746 					printf("\t %#04x%02x%02x%08x\n",
   3747 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3748 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3749 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3750 					    txq->txq_descs[j].wtx_cmdlen);
   3751 				}
   3752 				if (j == txs->txs_lastdesc)
   3753 					break;
   3754 			}
   3755 		}
   3756 #endif
   3757 	}
   3758 }
   3759 
   3760 /*
   3761  * wm_tick:
   3762  *
   3763  *	One second timer, used to check link status, sweep up
   3764  *	completed transmit jobs, etc.
   3765  */
   3766 static void
   3767 wm_tick(void *arg)
   3768 {
   3769 	struct wm_softc *sc = arg;
   3770 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3771 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
   3772 	    cexterr;
   3773 #ifndef WM_MPSAFE
   3774 	int s = splnet();
   3775 #endif
   3776 
   3777 	WM_CORE_LOCK(sc);
   3778 
   3779 	if (sc->sc_core_stopping) {
   3780 		WM_CORE_UNLOCK(sc);
   3781 #ifndef WM_MPSAFE
   3782 		splx(s);
   3783 #endif
   3784 		return;
   3785 	}
   3786 
   3787 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
   3788 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
   3789 	mpc = CSR_READ(sc, WMREG_MPC);
   3790 	colc = CSR_READ(sc, WMREG_COLC);
   3791 	sec = CSR_READ(sc, WMREG_SEC);
   3792 	rlec = CSR_READ(sc, WMREG_RLEC);
   3793 
   3794 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
   3795 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
   3796 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
   3797 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
   3798 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
   3799 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
   3800 
   3801 	if (sc->sc_type >= WM_T_82542_2_1) {
   3802 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3803 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3804 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3805 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3806 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3807 	}
   3808 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
   3809 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
   3810 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
   3811 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
   3812 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
   3813 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
   3814 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
   3815 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
   3816 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
   3817 
   3818 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
   3819 	    CSR_READ(sc, WMREG_GORCL) + CSR_READ(sc, WMREG_GORCH));
   3820 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
   3821 	    CSR_READ(sc, WMREG_GOTCL) + CSR_READ(sc, WMREG_GOTCH));
   3822 
   3823 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
   3824 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
   3825 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
   3826 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
   3827 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
   3828 
   3829 	WM_EVCNT_ADD(&sc->sc_ev_tor,
   3830 	    CSR_READ(sc, WMREG_TORL) + CSR_READ(sc, WMREG_TORH));
   3831 	WM_EVCNT_ADD(&sc->sc_ev_tot,
   3832 	    CSR_READ(sc, WMREG_TOTL) + CSR_READ(sc, WMREG_TOTH));
   3833 
   3834 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
   3835 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
   3836 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
   3837 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
   3838 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
   3839 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
   3840 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
   3841 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
   3842 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
   3843 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
   3844 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
   3845 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
   3846 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
   3847 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
   3848 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
   3849 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
   3850 	WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
   3851 	WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
   3852 	WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
   3853 	WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
   3854 	WM_EVCNT_ADD(&sc->sc_ev_ictxact, CSR_READ(sc, WMREG_ICTXATC));
   3855 	WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
   3856 	WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc, CSR_READ(sc, WMREG_ICTXQMTC));
   3857 	WM_EVCNT_ADD(&sc->sc_ev_icrxdmtc, CSR_READ(sc, WMREG_ICRXDMTC));
   3858 	WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
   3859 
   3860 	if (sc->sc_type >= WM_T_82543) {
   3861 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
   3862 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
   3863 		cexterr = CSR_READ(sc, WMREG_CEXTERR);
   3864 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
   3865 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
   3866 		WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
   3867 
   3868 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
   3869 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
   3870 		WM_EVCNT_ADD(&sc->sc_ev_tsctfc, CSR_READ(sc, WMREG_TSCTFC));
   3871 	} else
   3872 		algnerrc = rxerrc = cexterr = 0;
   3873 
   3874 	if (sc->sc_type >= WM_T_82540) {
   3875 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
   3876 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
   3877 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
   3878 	}
   3879 	if (((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003))
   3880 	    && ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0)) {
   3881 		WM_EVCNT_ADD(&sc->sc_ev_b2ogprc, CSR_READ(sc, WMREG_B2OGPRC));
   3882 		WM_EVCNT_ADD(&sc->sc_ev_o2bspc, CSR_READ(sc, WMREG_O2BSPC));
   3883 		WM_EVCNT_ADD(&sc->sc_ev_b2ospc, CSR_READ(sc, WMREG_B2OSPC));
   3884 		WM_EVCNT_ADD(&sc->sc_ev_o2bgptc, CSR_READ(sc, WMREG_O2BGPTC));
   3885 	}
   3886 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3887 	if_statadd_ref(nsr, if_collisions, colc);
   3888 	if_statadd_ref(nsr, if_ierrors,
   3889 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
   3890 	/*
   3891 	 * WMREG_RNBC is incremented when there are no available buffers in host
   3892 	 * memory. It does not mean the number of dropped packets, because an
   3893 	 * Ethernet controller can receive packets in such case if there is
   3894 	 * space in the phy's FIFO.
   3895 	 *
   3896 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3897 	 * own EVCNT instead of if_iqdrops.
   3898 	 */
   3899 	if_statadd_ref(nsr, if_iqdrops, mpc);
   3900 	IF_STAT_PUTREF(ifp);
   3901 
   3902 	if (sc->sc_flags & WM_F_HAS_MII)
   3903 		mii_tick(&sc->sc_mii);
   3904 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3905 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3906 		wm_serdes_tick(sc);
   3907 	else
   3908 		wm_tbi_tick(sc);
   3909 
   3910 	WM_CORE_UNLOCK(sc);
   3911 #ifndef WM_MPSAFE
   3912 	splx(s);
   3913 #endif
   3914 
   3915 	if (wm_watchdog(ifp))
   3916 		callout_schedule(&sc->sc_tick_ch, hz);
   3917 }
   3918 
   3919 static int
   3920 wm_ifflags_cb(struct ethercom *ec)
   3921 {
   3922 	struct ifnet *ifp = &ec->ec_if;
   3923 	struct wm_softc *sc = ifp->if_softc;
   3924 	u_short iffchange;
   3925 	int ecchange;
   3926 	bool needreset = false;
   3927 	int rc = 0;
   3928 
   3929 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3930 		device_xname(sc->sc_dev), __func__));
   3931 
   3932 	KASSERT(IFNET_LOCKED(ifp));
   3933 	WM_CORE_LOCK(sc);
   3934 
   3935 	/*
   3936 	 * Check for if_flags.
   3937 	 * Main usage is to prevent linkdown when opening bpf.
   3938 	 */
   3939 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3940 	sc->sc_if_flags = ifp->if_flags;
   3941 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3942 		needreset = true;
   3943 		goto ec;
   3944 	}
   3945 
   3946 	/* iff related updates */
   3947 	if ((iffchange & IFF_PROMISC) != 0)
   3948 		wm_set_filter(sc);
   3949 
   3950 	wm_set_vlan(sc);
   3951 
   3952 ec:
   3953 	/* Check for ec_capenable. */
   3954 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3955 	sc->sc_ec_capenable = ec->ec_capenable;
   3956 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3957 		needreset = true;
   3958 		goto out;
   3959 	}
   3960 
   3961 	/* ec related updates */
   3962 	wm_set_eee(sc);
   3963 
   3964 out:
   3965 	if (needreset)
   3966 		rc = ENETRESET;
   3967 	WM_CORE_UNLOCK(sc);
   3968 
   3969 	return rc;
   3970 }
   3971 
   3972 static bool
   3973 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3974 {
   3975 
   3976 	switch (sc->sc_phytype) {
   3977 	case WMPHY_82577: /* ihphy */
   3978 	case WMPHY_82578: /* atphy */
   3979 	case WMPHY_82579: /* ihphy */
   3980 	case WMPHY_I217: /* ihphy */
   3981 	case WMPHY_82580: /* ihphy */
   3982 	case WMPHY_I350: /* ihphy */
   3983 		return true;
   3984 	default:
   3985 		return false;
   3986 	}
   3987 }
   3988 
   3989 static void
   3990 wm_set_linkdown_discard(struct wm_softc *sc)
   3991 {
   3992 
   3993 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3994 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3995 
   3996 		mutex_enter(txq->txq_lock);
   3997 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3998 		mutex_exit(txq->txq_lock);
   3999 	}
   4000 }
   4001 
   4002 static void
   4003 wm_clear_linkdown_discard(struct wm_softc *sc)
   4004 {
   4005 
   4006 	for (int i = 0; i < sc->sc_nqueues; i++) {
   4007 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4008 
   4009 		mutex_enter(txq->txq_lock);
   4010 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   4011 		mutex_exit(txq->txq_lock);
   4012 	}
   4013 }
   4014 
   4015 /*
   4016  * wm_ioctl:		[ifnet interface function]
   4017  *
   4018  *	Handle control requests from the operator.
   4019  */
   4020 static int
   4021 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   4022 {
   4023 	struct wm_softc *sc = ifp->if_softc;
   4024 	struct ifreq *ifr = (struct ifreq *)data;
   4025 	struct ifaddr *ifa = (struct ifaddr *)data;
   4026 	struct sockaddr_dl *sdl;
   4027 	int error;
   4028 
   4029 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4030 		device_xname(sc->sc_dev), __func__));
   4031 
   4032 	switch (cmd) {
   4033 	case SIOCADDMULTI:
   4034 	case SIOCDELMULTI:
   4035 		break;
   4036 	default:
   4037 		KASSERT(IFNET_LOCKED(ifp));
   4038 	}
   4039 
   4040 #ifndef WM_MPSAFE
   4041 	const int s = splnet();
   4042 #endif
   4043 	switch (cmd) {
   4044 	case SIOCSIFMEDIA:
   4045 		WM_CORE_LOCK(sc);
   4046 		/* Flow control requires full-duplex mode. */
   4047 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   4048 		    (ifr->ifr_media & IFM_FDX) == 0)
   4049 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   4050 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   4051 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   4052 				/* We can do both TXPAUSE and RXPAUSE. */
   4053 				ifr->ifr_media |=
   4054 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   4055 			}
   4056 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   4057 		}
   4058 		WM_CORE_UNLOCK(sc);
   4059 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   4060 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   4061 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
   4062 				DPRINTF(sc, WM_DEBUG_LINK,
   4063 				    ("%s: %s: Set linkdown discard flag\n",
   4064 					device_xname(sc->sc_dev), __func__));
   4065 				wm_set_linkdown_discard(sc);
   4066 			}
   4067 		}
   4068 		break;
   4069 	case SIOCINITIFADDR:
   4070 		WM_CORE_LOCK(sc);
   4071 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   4072 			sdl = satosdl(ifp->if_dl->ifa_addr);
   4073 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   4074 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   4075 			/* Unicast address is the first multicast entry */
   4076 			wm_set_filter(sc);
   4077 			error = 0;
   4078 			WM_CORE_UNLOCK(sc);
   4079 			break;
   4080 		}
   4081 		WM_CORE_UNLOCK(sc);
   4082 		/*FALLTHROUGH*/
   4083 	default:
   4084 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   4085 			if (((ifp->if_flags & IFF_UP) != 0) &&
   4086 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
   4087 				DPRINTF(sc, WM_DEBUG_LINK,
   4088 				    ("%s: %s: Set linkdown discard flag\n",
   4089 					device_xname(sc->sc_dev), __func__));
   4090 				wm_set_linkdown_discard(sc);
   4091 			}
   4092 		}
   4093 #ifdef WM_MPSAFE
   4094 		const int s = splnet();
   4095 #endif
   4096 		/* It may call wm_start, so unlock here */
   4097 		error = ether_ioctl(ifp, cmd, data);
   4098 #ifdef WM_MPSAFE
   4099 		splx(s);
   4100 #endif
   4101 		if (error != ENETRESET)
   4102 			break;
   4103 
   4104 		error = 0;
   4105 
   4106 		if (cmd == SIOCSIFCAP)
   4107 			error = if_init(ifp);
   4108 		else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
   4109 			WM_CORE_LOCK(sc);
   4110 			if (sc->sc_if_flags & IFF_RUNNING) {
   4111 				/*
   4112 				 * Multicast list has changed; set the hardware filter
   4113 				 * accordingly.
   4114 				 */
   4115 				wm_set_filter(sc);
   4116 			}
   4117 			WM_CORE_UNLOCK(sc);
   4118 		}
   4119 		break;
   4120 	}
   4121 
   4122 #ifndef WM_MPSAFE
   4123 	splx(s);
   4124 #endif
   4125 	return error;
   4126 }
   4127 
   4128 /* MAC address related */
   4129 
   4130 /*
   4131  * Get the offset of MAC address and return it.
   4132  * If error occured, use offset 0.
   4133  */
   4134 static uint16_t
   4135 wm_check_alt_mac_addr(struct wm_softc *sc)
   4136 {
   4137 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4138 	uint16_t offset = NVM_OFF_MACADDR;
   4139 
   4140 	/* Try to read alternative MAC address pointer */
   4141 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   4142 		return 0;
   4143 
   4144 	/* Check pointer if it's valid or not. */
   4145 	if ((offset == 0x0000) || (offset == 0xffff))
   4146 		return 0;
   4147 
   4148 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   4149 	/*
   4150 	 * Check whether alternative MAC address is valid or not.
   4151 	 * Some cards have non 0xffff pointer but those don't use
   4152 	 * alternative MAC address in reality.
   4153 	 *
   4154 	 * Check whether the broadcast bit is set or not.
   4155 	 */
   4156 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   4157 		if (((myea[0] & 0xff) & 0x01) == 0)
   4158 			return offset; /* Found */
   4159 
   4160 	/* Not found */
   4161 	return 0;
   4162 }
   4163 
   4164 static int
   4165 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   4166 {
   4167 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4168 	uint16_t offset = NVM_OFF_MACADDR;
   4169 	int do_invert = 0;
   4170 
   4171 	switch (sc->sc_type) {
   4172 	case WM_T_82580:
   4173 	case WM_T_I350:
   4174 	case WM_T_I354:
   4175 		/* EEPROM Top Level Partitioning */
   4176 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   4177 		break;
   4178 	case WM_T_82571:
   4179 	case WM_T_82575:
   4180 	case WM_T_82576:
   4181 	case WM_T_80003:
   4182 	case WM_T_I210:
   4183 	case WM_T_I211:
   4184 		offset = wm_check_alt_mac_addr(sc);
   4185 		if (offset == 0)
   4186 			if ((sc->sc_funcid & 0x01) == 1)
   4187 				do_invert = 1;
   4188 		break;
   4189 	default:
   4190 		if ((sc->sc_funcid & 0x01) == 1)
   4191 			do_invert = 1;
   4192 		break;
   4193 	}
   4194 
   4195 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   4196 		goto bad;
   4197 
   4198 	enaddr[0] = myea[0] & 0xff;
   4199 	enaddr[1] = myea[0] >> 8;
   4200 	enaddr[2] = myea[1] & 0xff;
   4201 	enaddr[3] = myea[1] >> 8;
   4202 	enaddr[4] = myea[2] & 0xff;
   4203 	enaddr[5] = myea[2] >> 8;
   4204 
   4205 	/*
   4206 	 * Toggle the LSB of the MAC address on the second port
   4207 	 * of some dual port cards.
   4208 	 */
   4209 	if (do_invert != 0)
   4210 		enaddr[5] ^= 1;
   4211 
   4212 	return 0;
   4213 
   4214  bad:
   4215 	return -1;
   4216 }
   4217 
   4218 /*
   4219  * wm_set_ral:
   4220  *
   4221  *	Set an entery in the receive address list.
   4222  */
   4223 static void
   4224 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   4225 {
   4226 	uint32_t ral_lo, ral_hi, addrl, addrh;
   4227 	uint32_t wlock_mac;
   4228 	int rv;
   4229 
   4230 	if (enaddr != NULL) {
   4231 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   4232 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   4233 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   4234 		ral_hi |= RAL_AV;
   4235 	} else {
   4236 		ral_lo = 0;
   4237 		ral_hi = 0;
   4238 	}
   4239 
   4240 	switch (sc->sc_type) {
   4241 	case WM_T_82542_2_0:
   4242 	case WM_T_82542_2_1:
   4243 	case WM_T_82543:
   4244 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   4245 		CSR_WRITE_FLUSH(sc);
   4246 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   4247 		CSR_WRITE_FLUSH(sc);
   4248 		break;
   4249 	case WM_T_PCH2:
   4250 	case WM_T_PCH_LPT:
   4251 	case WM_T_PCH_SPT:
   4252 	case WM_T_PCH_CNP:
   4253 		if (idx == 0) {
   4254 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4255 			CSR_WRITE_FLUSH(sc);
   4256 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4257 			CSR_WRITE_FLUSH(sc);
   4258 			return;
   4259 		}
   4260 		if (sc->sc_type != WM_T_PCH2) {
   4261 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   4262 			    FWSM_WLOCK_MAC);
   4263 			addrl = WMREG_SHRAL(idx - 1);
   4264 			addrh = WMREG_SHRAH(idx - 1);
   4265 		} else {
   4266 			wlock_mac = 0;
   4267 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   4268 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   4269 		}
   4270 
   4271 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   4272 			rv = wm_get_swflag_ich8lan(sc);
   4273 			if (rv != 0)
   4274 				return;
   4275 			CSR_WRITE(sc, addrl, ral_lo);
   4276 			CSR_WRITE_FLUSH(sc);
   4277 			CSR_WRITE(sc, addrh, ral_hi);
   4278 			CSR_WRITE_FLUSH(sc);
   4279 			wm_put_swflag_ich8lan(sc);
   4280 		}
   4281 
   4282 		break;
   4283 	default:
   4284 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4285 		CSR_WRITE_FLUSH(sc);
   4286 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4287 		CSR_WRITE_FLUSH(sc);
   4288 		break;
   4289 	}
   4290 }
   4291 
   4292 /*
   4293  * wm_mchash:
   4294  *
   4295  *	Compute the hash of the multicast address for the 4096-bit
   4296  *	multicast filter.
   4297  */
   4298 static uint32_t
   4299 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   4300 {
   4301 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   4302 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   4303 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   4304 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   4305 	uint32_t hash;
   4306 
   4307 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4308 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4309 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4310 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4311 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   4312 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   4313 		return (hash & 0x3ff);
   4314 	}
   4315 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   4316 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   4317 
   4318 	return (hash & 0xfff);
   4319 }
   4320 
   4321 /*
   4322  *
   4323  *
   4324  */
   4325 static int
   4326 wm_rar_count(struct wm_softc *sc)
   4327 {
   4328 	int size;
   4329 
   4330 	switch (sc->sc_type) {
   4331 	case WM_T_ICH8:
   4332 		size = WM_RAL_TABSIZE_ICH8 -1;
   4333 		break;
   4334 	case WM_T_ICH9:
   4335 	case WM_T_ICH10:
   4336 	case WM_T_PCH:
   4337 		size = WM_RAL_TABSIZE_ICH8;
   4338 		break;
   4339 	case WM_T_PCH2:
   4340 		size = WM_RAL_TABSIZE_PCH2;
   4341 		break;
   4342 	case WM_T_PCH_LPT:
   4343 	case WM_T_PCH_SPT:
   4344 	case WM_T_PCH_CNP:
   4345 		size = WM_RAL_TABSIZE_PCH_LPT;
   4346 		break;
   4347 	case WM_T_82575:
   4348 	case WM_T_I210:
   4349 	case WM_T_I211:
   4350 		size = WM_RAL_TABSIZE_82575;
   4351 		break;
   4352 	case WM_T_82576:
   4353 	case WM_T_82580:
   4354 		size = WM_RAL_TABSIZE_82576;
   4355 		break;
   4356 	case WM_T_I350:
   4357 	case WM_T_I354:
   4358 		size = WM_RAL_TABSIZE_I350;
   4359 		break;
   4360 	default:
   4361 		size = WM_RAL_TABSIZE;
   4362 	}
   4363 
   4364 	return size;
   4365 }
   4366 
   4367 /*
   4368  * wm_set_filter:
   4369  *
   4370  *	Set up the receive filter.
   4371  */
   4372 static void
   4373 wm_set_filter(struct wm_softc *sc)
   4374 {
   4375 	struct ethercom *ec = &sc->sc_ethercom;
   4376 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   4377 	struct ether_multi *enm;
   4378 	struct ether_multistep step;
   4379 	bus_addr_t mta_reg;
   4380 	uint32_t hash, reg, bit;
   4381 	int i, size, ralmax, rv;
   4382 
   4383 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4384 		device_xname(sc->sc_dev), __func__));
   4385 	KASSERT(WM_CORE_LOCKED(sc));
   4386 
   4387 	if (sc->sc_type >= WM_T_82544)
   4388 		mta_reg = WMREG_CORDOVA_MTA;
   4389 	else
   4390 		mta_reg = WMREG_MTA;
   4391 
   4392 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   4393 
   4394 	if (sc->sc_if_flags & IFF_BROADCAST)
   4395 		sc->sc_rctl |= RCTL_BAM;
   4396 	if (sc->sc_if_flags & IFF_PROMISC) {
   4397 		sc->sc_rctl |= RCTL_UPE;
   4398 		ETHER_LOCK(ec);
   4399 		ec->ec_flags |= ETHER_F_ALLMULTI;
   4400 		ETHER_UNLOCK(ec);
   4401 		goto allmulti;
   4402 	}
   4403 
   4404 	/*
   4405 	 * Set the station address in the first RAL slot, and
   4406 	 * clear the remaining slots.
   4407 	 */
   4408 	size = wm_rar_count(sc);
   4409 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   4410 
   4411 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   4412 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   4413 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   4414 		switch (i) {
   4415 		case 0:
   4416 			/* We can use all entries */
   4417 			ralmax = size;
   4418 			break;
   4419 		case 1:
   4420 			/* Only RAR[0] */
   4421 			ralmax = 1;
   4422 			break;
   4423 		default:
   4424 			/* Available SHRA + RAR[0] */
   4425 			ralmax = i + 1;
   4426 		}
   4427 	} else
   4428 		ralmax = size;
   4429 	for (i = 1; i < size; i++) {
   4430 		if (i < ralmax)
   4431 			wm_set_ral(sc, NULL, i);
   4432 	}
   4433 
   4434 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4435 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4436 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4437 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   4438 		size = WM_ICH8_MC_TABSIZE;
   4439 	else
   4440 		size = WM_MC_TABSIZE;
   4441 	/* Clear out the multicast table. */
   4442 	for (i = 0; i < size; i++) {
   4443 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4444 		CSR_WRITE_FLUSH(sc);
   4445 	}
   4446 
   4447 	ETHER_LOCK(ec);
   4448 	ETHER_FIRST_MULTI(step, ec, enm);
   4449 	while (enm != NULL) {
   4450 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4451 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4452 			ETHER_UNLOCK(ec);
   4453 			/*
   4454 			 * We must listen to a range of multicast addresses.
   4455 			 * For now, just accept all multicasts, rather than
   4456 			 * trying to set only those filter bits needed to match
   4457 			 * the range.  (At this time, the only use of address
   4458 			 * ranges is for IP multicast routing, for which the
   4459 			 * range is big enough to require all bits set.)
   4460 			 */
   4461 			goto allmulti;
   4462 		}
   4463 
   4464 		hash = wm_mchash(sc, enm->enm_addrlo);
   4465 
   4466 		reg = (hash >> 5);
   4467 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4468 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4469 		    || (sc->sc_type == WM_T_PCH2)
   4470 		    || (sc->sc_type == WM_T_PCH_LPT)
   4471 		    || (sc->sc_type == WM_T_PCH_SPT)
   4472 		    || (sc->sc_type == WM_T_PCH_CNP))
   4473 			reg &= 0x1f;
   4474 		else
   4475 			reg &= 0x7f;
   4476 		bit = hash & 0x1f;
   4477 
   4478 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4479 		hash |= 1U << bit;
   4480 
   4481 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4482 			/*
   4483 			 * 82544 Errata 9: Certain register cannot be written
   4484 			 * with particular alignments in PCI-X bus operation
   4485 			 * (FCAH, MTA and VFTA).
   4486 			 */
   4487 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4488 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4489 			CSR_WRITE_FLUSH(sc);
   4490 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4491 			CSR_WRITE_FLUSH(sc);
   4492 		} else {
   4493 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4494 			CSR_WRITE_FLUSH(sc);
   4495 		}
   4496 
   4497 		ETHER_NEXT_MULTI(step, enm);
   4498 	}
   4499 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4500 	ETHER_UNLOCK(ec);
   4501 
   4502 	goto setit;
   4503 
   4504  allmulti:
   4505 	sc->sc_rctl |= RCTL_MPE;
   4506 
   4507  setit:
   4508 	if (sc->sc_type >= WM_T_PCH2) {
   4509 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4510 		    && (ifp->if_mtu > ETHERMTU))
   4511 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4512 		else
   4513 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4514 		if (rv != 0)
   4515 			device_printf(sc->sc_dev,
   4516 			    "Failed to do workaround for jumbo frame.\n");
   4517 	}
   4518 
   4519 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4520 }
   4521 
   4522 /* Reset and init related */
   4523 
   4524 static void
   4525 wm_set_vlan(struct wm_softc *sc)
   4526 {
   4527 
   4528 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4529 		device_xname(sc->sc_dev), __func__));
   4530 
   4531 	/* Deal with VLAN enables. */
   4532 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4533 		sc->sc_ctrl |= CTRL_VME;
   4534 	else
   4535 		sc->sc_ctrl &= ~CTRL_VME;
   4536 
   4537 	/* Write the control registers. */
   4538 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4539 }
   4540 
   4541 static void
   4542 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4543 {
   4544 	uint32_t gcr;
   4545 	pcireg_t ctrl2;
   4546 
   4547 	gcr = CSR_READ(sc, WMREG_GCR);
   4548 
   4549 	/* Only take action if timeout value is defaulted to 0 */
   4550 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4551 		goto out;
   4552 
   4553 	if ((gcr & GCR_CAP_VER2) == 0) {
   4554 		gcr |= GCR_CMPL_TMOUT_10MS;
   4555 		goto out;
   4556 	}
   4557 
   4558 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4559 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4560 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4561 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4562 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4563 
   4564 out:
   4565 	/* Disable completion timeout resend */
   4566 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4567 
   4568 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4569 }
   4570 
   4571 void
   4572 wm_get_auto_rd_done(struct wm_softc *sc)
   4573 {
   4574 	int i;
   4575 
   4576 	/* wait for eeprom to reload */
   4577 	switch (sc->sc_type) {
   4578 	case WM_T_82571:
   4579 	case WM_T_82572:
   4580 	case WM_T_82573:
   4581 	case WM_T_82574:
   4582 	case WM_T_82583:
   4583 	case WM_T_82575:
   4584 	case WM_T_82576:
   4585 	case WM_T_82580:
   4586 	case WM_T_I350:
   4587 	case WM_T_I354:
   4588 	case WM_T_I210:
   4589 	case WM_T_I211:
   4590 	case WM_T_80003:
   4591 	case WM_T_ICH8:
   4592 	case WM_T_ICH9:
   4593 		for (i = 0; i < 10; i++) {
   4594 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4595 				break;
   4596 			delay(1000);
   4597 		}
   4598 		if (i == 10) {
   4599 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4600 			    "complete\n", device_xname(sc->sc_dev));
   4601 		}
   4602 		break;
   4603 	default:
   4604 		break;
   4605 	}
   4606 }
   4607 
   4608 void
   4609 wm_lan_init_done(struct wm_softc *sc)
   4610 {
   4611 	uint32_t reg = 0;
   4612 	int i;
   4613 
   4614 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4615 		device_xname(sc->sc_dev), __func__));
   4616 
   4617 	/* Wait for eeprom to reload */
   4618 	switch (sc->sc_type) {
   4619 	case WM_T_ICH10:
   4620 	case WM_T_PCH:
   4621 	case WM_T_PCH2:
   4622 	case WM_T_PCH_LPT:
   4623 	case WM_T_PCH_SPT:
   4624 	case WM_T_PCH_CNP:
   4625 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4626 			reg = CSR_READ(sc, WMREG_STATUS);
   4627 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4628 				break;
   4629 			delay(100);
   4630 		}
   4631 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4632 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4633 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4634 		}
   4635 		break;
   4636 	default:
   4637 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4638 		    __func__);
   4639 		break;
   4640 	}
   4641 
   4642 	reg &= ~STATUS_LAN_INIT_DONE;
   4643 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4644 }
   4645 
   4646 void
   4647 wm_get_cfg_done(struct wm_softc *sc)
   4648 {
   4649 	int mask;
   4650 	uint32_t reg;
   4651 	int i;
   4652 
   4653 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4654 		device_xname(sc->sc_dev), __func__));
   4655 
   4656 	/* Wait for eeprom to reload */
   4657 	switch (sc->sc_type) {
   4658 	case WM_T_82542_2_0:
   4659 	case WM_T_82542_2_1:
   4660 		/* null */
   4661 		break;
   4662 	case WM_T_82543:
   4663 	case WM_T_82544:
   4664 	case WM_T_82540:
   4665 	case WM_T_82545:
   4666 	case WM_T_82545_3:
   4667 	case WM_T_82546:
   4668 	case WM_T_82546_3:
   4669 	case WM_T_82541:
   4670 	case WM_T_82541_2:
   4671 	case WM_T_82547:
   4672 	case WM_T_82547_2:
   4673 	case WM_T_82573:
   4674 	case WM_T_82574:
   4675 	case WM_T_82583:
   4676 		/* generic */
   4677 		delay(10*1000);
   4678 		break;
   4679 	case WM_T_80003:
   4680 	case WM_T_82571:
   4681 	case WM_T_82572:
   4682 	case WM_T_82575:
   4683 	case WM_T_82576:
   4684 	case WM_T_82580:
   4685 	case WM_T_I350:
   4686 	case WM_T_I354:
   4687 	case WM_T_I210:
   4688 	case WM_T_I211:
   4689 		if (sc->sc_type == WM_T_82571) {
   4690 			/* Only 82571 shares port 0 */
   4691 			mask = EEMNGCTL_CFGDONE_0;
   4692 		} else
   4693 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4694 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4695 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4696 				break;
   4697 			delay(1000);
   4698 		}
   4699 		if (i >= WM_PHY_CFG_TIMEOUT)
   4700 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4701 				device_xname(sc->sc_dev), __func__));
   4702 		break;
   4703 	case WM_T_ICH8:
   4704 	case WM_T_ICH9:
   4705 	case WM_T_ICH10:
   4706 	case WM_T_PCH:
   4707 	case WM_T_PCH2:
   4708 	case WM_T_PCH_LPT:
   4709 	case WM_T_PCH_SPT:
   4710 	case WM_T_PCH_CNP:
   4711 		delay(10*1000);
   4712 		if (sc->sc_type >= WM_T_ICH10)
   4713 			wm_lan_init_done(sc);
   4714 		else
   4715 			wm_get_auto_rd_done(sc);
   4716 
   4717 		/* Clear PHY Reset Asserted bit */
   4718 		reg = CSR_READ(sc, WMREG_STATUS);
   4719 		if ((reg & STATUS_PHYRA) != 0)
   4720 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4721 		break;
   4722 	default:
   4723 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4724 		    __func__);
   4725 		break;
   4726 	}
   4727 }
   4728 
   4729 int
   4730 wm_phy_post_reset(struct wm_softc *sc)
   4731 {
   4732 	device_t dev = sc->sc_dev;
   4733 	uint16_t reg;
   4734 	int rv = 0;
   4735 
   4736 	/* This function is only for ICH8 and newer. */
   4737 	if (sc->sc_type < WM_T_ICH8)
   4738 		return 0;
   4739 
   4740 	if (wm_phy_resetisblocked(sc)) {
   4741 		/* XXX */
   4742 		device_printf(dev, "PHY is blocked\n");
   4743 		return -1;
   4744 	}
   4745 
   4746 	/* Allow time for h/w to get to quiescent state after reset */
   4747 	delay(10*1000);
   4748 
   4749 	/* Perform any necessary post-reset workarounds */
   4750 	if (sc->sc_type == WM_T_PCH)
   4751 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4752 	else if (sc->sc_type == WM_T_PCH2)
   4753 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4754 	if (rv != 0)
   4755 		return rv;
   4756 
   4757 	/* Clear the host wakeup bit after lcd reset */
   4758 	if (sc->sc_type >= WM_T_PCH) {
   4759 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4760 		reg &= ~BM_WUC_HOST_WU_BIT;
   4761 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4762 	}
   4763 
   4764 	/* Configure the LCD with the extended configuration region in NVM */
   4765 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4766 		return rv;
   4767 
   4768 	/* Configure the LCD with the OEM bits in NVM */
   4769 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4770 
   4771 	if (sc->sc_type == WM_T_PCH2) {
   4772 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4773 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4774 			delay(10 * 1000);
   4775 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4776 		}
   4777 		/* Set EEE LPI Update Timer to 200usec */
   4778 		rv = sc->phy.acquire(sc);
   4779 		if (rv)
   4780 			return rv;
   4781 		rv = wm_write_emi_reg_locked(dev,
   4782 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4783 		sc->phy.release(sc);
   4784 	}
   4785 
   4786 	return rv;
   4787 }
   4788 
   4789 /* Only for PCH and newer */
   4790 static int
   4791 wm_write_smbus_addr(struct wm_softc *sc)
   4792 {
   4793 	uint32_t strap, freq;
   4794 	uint16_t phy_data;
   4795 	int rv;
   4796 
   4797 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4798 		device_xname(sc->sc_dev), __func__));
   4799 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4800 
   4801 	strap = CSR_READ(sc, WMREG_STRAP);
   4802 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4803 
   4804 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4805 	if (rv != 0)
   4806 		return rv;
   4807 
   4808 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4809 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4810 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4811 
   4812 	if (sc->sc_phytype == WMPHY_I217) {
   4813 		/* Restore SMBus frequency */
   4814 		if (freq --) {
   4815 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4816 			    | HV_SMB_ADDR_FREQ_HIGH);
   4817 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4818 			    HV_SMB_ADDR_FREQ_LOW);
   4819 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4820 			    HV_SMB_ADDR_FREQ_HIGH);
   4821 		} else
   4822 			DPRINTF(sc, WM_DEBUG_INIT,
   4823 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4824 				device_xname(sc->sc_dev), __func__));
   4825 	}
   4826 
   4827 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4828 	    phy_data);
   4829 }
   4830 
   4831 static int
   4832 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4833 {
   4834 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4835 	uint16_t phy_page = 0;
   4836 	int rv = 0;
   4837 
   4838 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4839 		device_xname(sc->sc_dev), __func__));
   4840 
   4841 	switch (sc->sc_type) {
   4842 	case WM_T_ICH8:
   4843 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4844 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4845 			return 0;
   4846 
   4847 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4848 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4849 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4850 			break;
   4851 		}
   4852 		/* FALLTHROUGH */
   4853 	case WM_T_PCH:
   4854 	case WM_T_PCH2:
   4855 	case WM_T_PCH_LPT:
   4856 	case WM_T_PCH_SPT:
   4857 	case WM_T_PCH_CNP:
   4858 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4859 		break;
   4860 	default:
   4861 		return 0;
   4862 	}
   4863 
   4864 	if ((rv = sc->phy.acquire(sc)) != 0)
   4865 		return rv;
   4866 
   4867 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4868 	if ((reg & sw_cfg_mask) == 0)
   4869 		goto release;
   4870 
   4871 	/*
   4872 	 * Make sure HW does not configure LCD from PHY extended configuration
   4873 	 * before SW configuration
   4874 	 */
   4875 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4876 	if ((sc->sc_type < WM_T_PCH2)
   4877 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4878 		goto release;
   4879 
   4880 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4881 		device_xname(sc->sc_dev), __func__));
   4882 	/* word_addr is in DWORD */
   4883 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4884 
   4885 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4886 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4887 	if (cnf_size == 0)
   4888 		goto release;
   4889 
   4890 	if (((sc->sc_type == WM_T_PCH)
   4891 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4892 	    || (sc->sc_type > WM_T_PCH)) {
   4893 		/*
   4894 		 * HW configures the SMBus address and LEDs when the OEM and
   4895 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4896 		 * are cleared, SW will configure them instead.
   4897 		 */
   4898 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4899 			device_xname(sc->sc_dev), __func__));
   4900 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4901 			goto release;
   4902 
   4903 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4904 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4905 		    (uint16_t)reg);
   4906 		if (rv != 0)
   4907 			goto release;
   4908 	}
   4909 
   4910 	/* Configure LCD from extended configuration region. */
   4911 	for (i = 0; i < cnf_size; i++) {
   4912 		uint16_t reg_data, reg_addr;
   4913 
   4914 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4915 			goto release;
   4916 
   4917 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4918 			goto release;
   4919 
   4920 		if (reg_addr == IGPHY_PAGE_SELECT)
   4921 			phy_page = reg_data;
   4922 
   4923 		reg_addr &= IGPHY_MAXREGADDR;
   4924 		reg_addr |= phy_page;
   4925 
   4926 		KASSERT(sc->phy.writereg_locked != NULL);
   4927 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4928 		    reg_data);
   4929 	}
   4930 
   4931 release:
   4932 	sc->phy.release(sc);
   4933 	return rv;
   4934 }
   4935 
   4936 /*
   4937  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4938  *  @sc:       pointer to the HW structure
   4939  *  @d0_state: boolean if entering d0 or d3 device state
   4940  *
   4941  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4942  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4943  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4944  */
   4945 int
   4946 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4947 {
   4948 	uint32_t mac_reg;
   4949 	uint16_t oem_reg;
   4950 	int rv;
   4951 
   4952 	if (sc->sc_type < WM_T_PCH)
   4953 		return 0;
   4954 
   4955 	rv = sc->phy.acquire(sc);
   4956 	if (rv != 0)
   4957 		return rv;
   4958 
   4959 	if (sc->sc_type == WM_T_PCH) {
   4960 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4961 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4962 			goto release;
   4963 	}
   4964 
   4965 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4966 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4967 		goto release;
   4968 
   4969 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4970 
   4971 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4972 	if (rv != 0)
   4973 		goto release;
   4974 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4975 
   4976 	if (d0_state) {
   4977 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4978 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4979 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4980 			oem_reg |= HV_OEM_BITS_LPLU;
   4981 	} else {
   4982 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4983 		    != 0)
   4984 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4985 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4986 		    != 0)
   4987 			oem_reg |= HV_OEM_BITS_LPLU;
   4988 	}
   4989 
   4990 	/* Set Restart auto-neg to activate the bits */
   4991 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4992 	    && (wm_phy_resetisblocked(sc) == false))
   4993 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4994 
   4995 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4996 
   4997 release:
   4998 	sc->phy.release(sc);
   4999 
   5000 	return rv;
   5001 }
   5002 
   5003 /* Init hardware bits */
   5004 void
   5005 wm_initialize_hardware_bits(struct wm_softc *sc)
   5006 {
   5007 	uint32_t tarc0, tarc1, reg;
   5008 
   5009 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5010 		device_xname(sc->sc_dev), __func__));
   5011 
   5012 	/* For 82571 variant, 80003 and ICHs */
   5013 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   5014 	    || (sc->sc_type >= WM_T_80003)) {
   5015 
   5016 		/* Transmit Descriptor Control 0 */
   5017 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   5018 		reg |= TXDCTL_COUNT_DESC;
   5019 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   5020 
   5021 		/* Transmit Descriptor Control 1 */
   5022 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   5023 		reg |= TXDCTL_COUNT_DESC;
   5024 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   5025 
   5026 		/* TARC0 */
   5027 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   5028 		switch (sc->sc_type) {
   5029 		case WM_T_82571:
   5030 		case WM_T_82572:
   5031 		case WM_T_82573:
   5032 		case WM_T_82574:
   5033 		case WM_T_82583:
   5034 		case WM_T_80003:
   5035 			/* Clear bits 30..27 */
   5036 			tarc0 &= ~__BITS(30, 27);
   5037 			break;
   5038 		default:
   5039 			break;
   5040 		}
   5041 
   5042 		switch (sc->sc_type) {
   5043 		case WM_T_82571:
   5044 		case WM_T_82572:
   5045 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   5046 
   5047 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5048 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   5049 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   5050 			/* 8257[12] Errata No.7 */
   5051 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   5052 
   5053 			/* TARC1 bit 28 */
   5054 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5055 				tarc1 &= ~__BIT(28);
   5056 			else
   5057 				tarc1 |= __BIT(28);
   5058 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5059 
   5060 			/*
   5061 			 * 8257[12] Errata No.13
   5062 			 * Disable Dyamic Clock Gating.
   5063 			 */
   5064 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5065 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   5066 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5067 			break;
   5068 		case WM_T_82573:
   5069 		case WM_T_82574:
   5070 		case WM_T_82583:
   5071 			if ((sc->sc_type == WM_T_82574)
   5072 			    || (sc->sc_type == WM_T_82583))
   5073 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   5074 
   5075 			/* Extended Device Control */
   5076 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5077 			reg &= ~__BIT(23);	/* Clear bit 23 */
   5078 			reg |= __BIT(22);	/* Set bit 22 */
   5079 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5080 
   5081 			/* Device Control */
   5082 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   5083 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5084 
   5085 			/* PCIe Control Register */
   5086 			/*
   5087 			 * 82573 Errata (unknown).
   5088 			 *
   5089 			 * 82574 Errata 25 and 82583 Errata 12
   5090 			 * "Dropped Rx Packets":
   5091 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   5092 			 */
   5093 			reg = CSR_READ(sc, WMREG_GCR);
   5094 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   5095 			CSR_WRITE(sc, WMREG_GCR, reg);
   5096 
   5097 			if ((sc->sc_type == WM_T_82574)
   5098 			    || (sc->sc_type == WM_T_82583)) {
   5099 				/*
   5100 				 * Document says this bit must be set for
   5101 				 * proper operation.
   5102 				 */
   5103 				reg = CSR_READ(sc, WMREG_GCR);
   5104 				reg |= __BIT(22);
   5105 				CSR_WRITE(sc, WMREG_GCR, reg);
   5106 
   5107 				/*
   5108 				 * Apply workaround for hardware errata
   5109 				 * documented in errata docs Fixes issue where
   5110 				 * some error prone or unreliable PCIe
   5111 				 * completions are occurring, particularly
   5112 				 * with ASPM enabled. Without fix, issue can
   5113 				 * cause Tx timeouts.
   5114 				 */
   5115 				reg = CSR_READ(sc, WMREG_GCR2);
   5116 				reg |= __BIT(0);
   5117 				CSR_WRITE(sc, WMREG_GCR2, reg);
   5118 			}
   5119 			break;
   5120 		case WM_T_80003:
   5121 			/* TARC0 */
   5122 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   5123 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   5124 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   5125 
   5126 			/* TARC1 bit 28 */
   5127 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5128 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5129 				tarc1 &= ~__BIT(28);
   5130 			else
   5131 				tarc1 |= __BIT(28);
   5132 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5133 			break;
   5134 		case WM_T_ICH8:
   5135 		case WM_T_ICH9:
   5136 		case WM_T_ICH10:
   5137 		case WM_T_PCH:
   5138 		case WM_T_PCH2:
   5139 		case WM_T_PCH_LPT:
   5140 		case WM_T_PCH_SPT:
   5141 		case WM_T_PCH_CNP:
   5142 			/* TARC0 */
   5143 			if (sc->sc_type == WM_T_ICH8) {
   5144 				/* Set TARC0 bits 29 and 28 */
   5145 				tarc0 |= __BITS(29, 28);
   5146 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   5147 				tarc0 |= __BIT(29);
   5148 				/*
   5149 				 *  Drop bit 28. From Linux.
   5150 				 * See I218/I219 spec update
   5151 				 * "5. Buffer Overrun While the I219 is
   5152 				 * Processing DMA Transactions"
   5153 				 */
   5154 				tarc0 &= ~__BIT(28);
   5155 			}
   5156 			/* Set TARC0 bits 23,24,26,27 */
   5157 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   5158 
   5159 			/* CTRL_EXT */
   5160 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5161 			reg |= __BIT(22);	/* Set bit 22 */
   5162 			/*
   5163 			 * Enable PHY low-power state when MAC is at D3
   5164 			 * w/o WoL
   5165 			 */
   5166 			if (sc->sc_type >= WM_T_PCH)
   5167 				reg |= CTRL_EXT_PHYPDEN;
   5168 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5169 
   5170 			/* TARC1 */
   5171 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5172 			/* bit 28 */
   5173 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5174 				tarc1 &= ~__BIT(28);
   5175 			else
   5176 				tarc1 |= __BIT(28);
   5177 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   5178 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5179 
   5180 			/* Device Status */
   5181 			if (sc->sc_type == WM_T_ICH8) {
   5182 				reg = CSR_READ(sc, WMREG_STATUS);
   5183 				reg &= ~__BIT(31);
   5184 				CSR_WRITE(sc, WMREG_STATUS, reg);
   5185 
   5186 			}
   5187 
   5188 			/* IOSFPC */
   5189 			if (sc->sc_type == WM_T_PCH_SPT) {
   5190 				reg = CSR_READ(sc, WMREG_IOSFPC);
   5191 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   5192 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   5193 			}
   5194 			/*
   5195 			 * Work-around descriptor data corruption issue during
   5196 			 * NFS v2 UDP traffic, just disable the NFS filtering
   5197 			 * capability.
   5198 			 */
   5199 			reg = CSR_READ(sc, WMREG_RFCTL);
   5200 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   5201 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5202 			break;
   5203 		default:
   5204 			break;
   5205 		}
   5206 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   5207 
   5208 		switch (sc->sc_type) {
   5209 		/*
   5210 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   5211 		 * Avoid RSS Hash Value bug.
   5212 		 */
   5213 		case WM_T_82571:
   5214 		case WM_T_82572:
   5215 		case WM_T_82573:
   5216 		case WM_T_80003:
   5217 		case WM_T_ICH8:
   5218 			reg = CSR_READ(sc, WMREG_RFCTL);
   5219 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   5220 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5221 			break;
   5222 		case WM_T_82574:
   5223 			/* Use extened Rx descriptor. */
   5224 			reg = CSR_READ(sc, WMREG_RFCTL);
   5225 			reg |= WMREG_RFCTL_EXSTEN;
   5226 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5227 			break;
   5228 		default:
   5229 			break;
   5230 		}
   5231 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   5232 		/*
   5233 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   5234 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   5235 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   5236 		 * Correctly by the Device"
   5237 		 *
   5238 		 * I354(C2000) Errata AVR53:
   5239 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   5240 		 * Hang"
   5241 		 */
   5242 		reg = CSR_READ(sc, WMREG_RFCTL);
   5243 		reg |= WMREG_RFCTL_IPV6EXDIS;
   5244 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   5245 	}
   5246 }
   5247 
   5248 static uint32_t
   5249 wm_rxpbs_adjust_82580(uint32_t val)
   5250 {
   5251 	uint32_t rv = 0;
   5252 
   5253 	if (val < __arraycount(wm_82580_rxpbs_table))
   5254 		rv = wm_82580_rxpbs_table[val];
   5255 
   5256 	return rv;
   5257 }
   5258 
   5259 /*
   5260  * wm_reset_phy:
   5261  *
   5262  *	generic PHY reset function.
   5263  *	Same as e1000_phy_hw_reset_generic()
   5264  */
   5265 static int
   5266 wm_reset_phy(struct wm_softc *sc)
   5267 {
   5268 	uint32_t reg;
   5269 
   5270 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5271 		device_xname(sc->sc_dev), __func__));
   5272 	if (wm_phy_resetisblocked(sc))
   5273 		return -1;
   5274 
   5275 	sc->phy.acquire(sc);
   5276 
   5277 	reg = CSR_READ(sc, WMREG_CTRL);
   5278 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   5279 	CSR_WRITE_FLUSH(sc);
   5280 
   5281 	delay(sc->phy.reset_delay_us);
   5282 
   5283 	CSR_WRITE(sc, WMREG_CTRL, reg);
   5284 	CSR_WRITE_FLUSH(sc);
   5285 
   5286 	delay(150);
   5287 
   5288 	sc->phy.release(sc);
   5289 
   5290 	wm_get_cfg_done(sc);
   5291 	wm_phy_post_reset(sc);
   5292 
   5293 	return 0;
   5294 }
   5295 
   5296 /*
   5297  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   5298  *
   5299  * In i219, the descriptor rings must be emptied before resetting the HW
   5300  * or before changing the device state to D3 during runtime (runtime PM).
   5301  *
   5302  * Failure to do this will cause the HW to enter a unit hang state which can
   5303  * only be released by PCI reset on the device.
   5304  *
   5305  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   5306  */
   5307 static void
   5308 wm_flush_desc_rings(struct wm_softc *sc)
   5309 {
   5310 	pcireg_t preg;
   5311 	uint32_t reg;
   5312 	struct wm_txqueue *txq;
   5313 	wiseman_txdesc_t *txd;
   5314 	int nexttx;
   5315 	uint32_t rctl;
   5316 
   5317 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   5318 
   5319 	/* First, disable MULR fix in FEXTNVM11 */
   5320 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5321 	reg |= FEXTNVM11_DIS_MULRFIX;
   5322 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5323 
   5324 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5325 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   5326 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   5327 		return;
   5328 
   5329 	/*
   5330 	 * Remove all descriptors from the tx_ring.
   5331 	 *
   5332 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   5333 	 * happens when the HW reads the regs. We assign the ring itself as
   5334 	 * the data of the next descriptor. We don't care about the data we are
   5335 	 * about to reset the HW.
   5336 	 */
   5337 #ifdef WM_DEBUG
   5338 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   5339 #endif
   5340 	reg = CSR_READ(sc, WMREG_TCTL);
   5341 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   5342 
   5343 	txq = &sc->sc_queue[0].wmq_txq;
   5344 	nexttx = txq->txq_next;
   5345 	txd = &txq->txq_descs[nexttx];
   5346 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   5347 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   5348 	txd->wtx_fields.wtxu_status = 0;
   5349 	txd->wtx_fields.wtxu_options = 0;
   5350 	txd->wtx_fields.wtxu_vlan = 0;
   5351 
   5352 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5353 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5354 
   5355 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   5356 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   5357 	CSR_WRITE_FLUSH(sc);
   5358 	delay(250);
   5359 
   5360 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5361 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   5362 		return;
   5363 
   5364 	/*
   5365 	 * Mark all descriptors in the RX ring as consumed and disable the
   5366 	 * rx ring.
   5367 	 */
   5368 #ifdef WM_DEBUG
   5369 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   5370 #endif
   5371 	rctl = CSR_READ(sc, WMREG_RCTL);
   5372 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5373 	CSR_WRITE_FLUSH(sc);
   5374 	delay(150);
   5375 
   5376 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   5377 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   5378 	reg &= 0xffffc000;
   5379 	/*
   5380 	 * Update thresholds: prefetch threshold to 31, host threshold
   5381 	 * to 1 and make sure the granularity is "descriptors" and not
   5382 	 * "cache lines"
   5383 	 */
   5384 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   5385 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   5386 
   5387 	/* Momentarily enable the RX ring for the changes to take effect */
   5388 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   5389 	CSR_WRITE_FLUSH(sc);
   5390 	delay(150);
   5391 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5392 }
   5393 
   5394 /*
   5395  * wm_reset:
   5396  *
   5397  *	Reset the i82542 chip.
   5398  */
   5399 static void
   5400 wm_reset(struct wm_softc *sc)
   5401 {
   5402 	int phy_reset = 0;
   5403 	int i, error = 0;
   5404 	uint32_t reg;
   5405 	uint16_t kmreg;
   5406 	int rv;
   5407 
   5408 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5409 		device_xname(sc->sc_dev), __func__));
   5410 	KASSERT(sc->sc_type != 0);
   5411 
   5412 	/*
   5413 	 * Allocate on-chip memory according to the MTU size.
   5414 	 * The Packet Buffer Allocation register must be written
   5415 	 * before the chip is reset.
   5416 	 */
   5417 	switch (sc->sc_type) {
   5418 	case WM_T_82547:
   5419 	case WM_T_82547_2:
   5420 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5421 		    PBA_22K : PBA_30K;
   5422 		for (i = 0; i < sc->sc_nqueues; i++) {
   5423 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5424 			txq->txq_fifo_head = 0;
   5425 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   5426 			txq->txq_fifo_size =
   5427 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   5428 			txq->txq_fifo_stall = 0;
   5429 		}
   5430 		break;
   5431 	case WM_T_82571:
   5432 	case WM_T_82572:
   5433 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   5434 	case WM_T_80003:
   5435 		sc->sc_pba = PBA_32K;
   5436 		break;
   5437 	case WM_T_82573:
   5438 		sc->sc_pba = PBA_12K;
   5439 		break;
   5440 	case WM_T_82574:
   5441 	case WM_T_82583:
   5442 		sc->sc_pba = PBA_20K;
   5443 		break;
   5444 	case WM_T_82576:
   5445 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5446 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5447 		break;
   5448 	case WM_T_82580:
   5449 	case WM_T_I350:
   5450 	case WM_T_I354:
   5451 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5452 		break;
   5453 	case WM_T_I210:
   5454 	case WM_T_I211:
   5455 		sc->sc_pba = PBA_34K;
   5456 		break;
   5457 	case WM_T_ICH8:
   5458 		/* Workaround for a bit corruption issue in FIFO memory */
   5459 		sc->sc_pba = PBA_8K;
   5460 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5461 		break;
   5462 	case WM_T_ICH9:
   5463 	case WM_T_ICH10:
   5464 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5465 		    PBA_14K : PBA_10K;
   5466 		break;
   5467 	case WM_T_PCH:
   5468 	case WM_T_PCH2:	/* XXX 14K? */
   5469 	case WM_T_PCH_LPT:
   5470 	case WM_T_PCH_SPT:
   5471 	case WM_T_PCH_CNP:
   5472 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5473 		    PBA_12K : PBA_26K;
   5474 		break;
   5475 	default:
   5476 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5477 		    PBA_40K : PBA_48K;
   5478 		break;
   5479 	}
   5480 	/*
   5481 	 * Only old or non-multiqueue devices have the PBA register
   5482 	 * XXX Need special handling for 82575.
   5483 	 */
   5484 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5485 	    || (sc->sc_type == WM_T_82575))
   5486 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5487 
   5488 	/* Prevent the PCI-E bus from sticking */
   5489 	if (sc->sc_flags & WM_F_PCIE) {
   5490 		int timeout = 800;
   5491 
   5492 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5493 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5494 
   5495 		while (timeout--) {
   5496 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5497 			    == 0)
   5498 				break;
   5499 			delay(100);
   5500 		}
   5501 		if (timeout == 0)
   5502 			device_printf(sc->sc_dev,
   5503 			    "failed to disable bus mastering\n");
   5504 	}
   5505 
   5506 	/* Set the completion timeout for interface */
   5507 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5508 	    || (sc->sc_type == WM_T_82580)
   5509 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5510 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5511 		wm_set_pcie_completion_timeout(sc);
   5512 
   5513 	/* Clear interrupt */
   5514 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5515 	if (wm_is_using_msix(sc)) {
   5516 		if (sc->sc_type != WM_T_82574) {
   5517 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5518 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5519 		} else
   5520 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5521 	}
   5522 
   5523 	/* Stop the transmit and receive processes. */
   5524 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5525 	sc->sc_rctl &= ~RCTL_EN;
   5526 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5527 	CSR_WRITE_FLUSH(sc);
   5528 
   5529 	/* XXX set_tbi_sbp_82543() */
   5530 
   5531 	delay(10*1000);
   5532 
   5533 	/* Must acquire the MDIO ownership before MAC reset */
   5534 	switch (sc->sc_type) {
   5535 	case WM_T_82573:
   5536 	case WM_T_82574:
   5537 	case WM_T_82583:
   5538 		error = wm_get_hw_semaphore_82573(sc);
   5539 		break;
   5540 	default:
   5541 		break;
   5542 	}
   5543 
   5544 	/*
   5545 	 * 82541 Errata 29? & 82547 Errata 28?
   5546 	 * See also the description about PHY_RST bit in CTRL register
   5547 	 * in 8254x_GBe_SDM.pdf.
   5548 	 */
   5549 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5550 		CSR_WRITE(sc, WMREG_CTRL,
   5551 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5552 		CSR_WRITE_FLUSH(sc);
   5553 		delay(5000);
   5554 	}
   5555 
   5556 	switch (sc->sc_type) {
   5557 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5558 	case WM_T_82541:
   5559 	case WM_T_82541_2:
   5560 	case WM_T_82547:
   5561 	case WM_T_82547_2:
   5562 		/*
   5563 		 * On some chipsets, a reset through a memory-mapped write
   5564 		 * cycle can cause the chip to reset before completing the
   5565 		 * write cycle. This causes major headache that can be avoided
   5566 		 * by issuing the reset via indirect register writes through
   5567 		 * I/O space.
   5568 		 *
   5569 		 * So, if we successfully mapped the I/O BAR at attach time,
   5570 		 * use that. Otherwise, try our luck with a memory-mapped
   5571 		 * reset.
   5572 		 */
   5573 		if (sc->sc_flags & WM_F_IOH_VALID)
   5574 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5575 		else
   5576 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5577 		break;
   5578 	case WM_T_82545_3:
   5579 	case WM_T_82546_3:
   5580 		/* Use the shadow control register on these chips. */
   5581 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5582 		break;
   5583 	case WM_T_80003:
   5584 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5585 		sc->phy.acquire(sc);
   5586 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5587 		sc->phy.release(sc);
   5588 		break;
   5589 	case WM_T_ICH8:
   5590 	case WM_T_ICH9:
   5591 	case WM_T_ICH10:
   5592 	case WM_T_PCH:
   5593 	case WM_T_PCH2:
   5594 	case WM_T_PCH_LPT:
   5595 	case WM_T_PCH_SPT:
   5596 	case WM_T_PCH_CNP:
   5597 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5598 		if (wm_phy_resetisblocked(sc) == false) {
   5599 			/*
   5600 			 * Gate automatic PHY configuration by hardware on
   5601 			 * non-managed 82579
   5602 			 */
   5603 			if ((sc->sc_type == WM_T_PCH2)
   5604 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5605 				== 0))
   5606 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5607 
   5608 			reg |= CTRL_PHY_RESET;
   5609 			phy_reset = 1;
   5610 		} else
   5611 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5612 		sc->phy.acquire(sc);
   5613 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5614 		/* Don't insert a completion barrier when reset */
   5615 		delay(20*1000);
   5616 		mutex_exit(sc->sc_ich_phymtx);
   5617 		break;
   5618 	case WM_T_82580:
   5619 	case WM_T_I350:
   5620 	case WM_T_I354:
   5621 	case WM_T_I210:
   5622 	case WM_T_I211:
   5623 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5624 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5625 			CSR_WRITE_FLUSH(sc);
   5626 		delay(5000);
   5627 		break;
   5628 	case WM_T_82542_2_0:
   5629 	case WM_T_82542_2_1:
   5630 	case WM_T_82543:
   5631 	case WM_T_82540:
   5632 	case WM_T_82545:
   5633 	case WM_T_82546:
   5634 	case WM_T_82571:
   5635 	case WM_T_82572:
   5636 	case WM_T_82573:
   5637 	case WM_T_82574:
   5638 	case WM_T_82575:
   5639 	case WM_T_82576:
   5640 	case WM_T_82583:
   5641 	default:
   5642 		/* Everything else can safely use the documented method. */
   5643 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5644 		break;
   5645 	}
   5646 
   5647 	/* Must release the MDIO ownership after MAC reset */
   5648 	switch (sc->sc_type) {
   5649 	case WM_T_82573:
   5650 	case WM_T_82574:
   5651 	case WM_T_82583:
   5652 		if (error == 0)
   5653 			wm_put_hw_semaphore_82573(sc);
   5654 		break;
   5655 	default:
   5656 		break;
   5657 	}
   5658 
   5659 	/* Set Phy Config Counter to 50msec */
   5660 	if (sc->sc_type == WM_T_PCH2) {
   5661 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5662 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5663 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5664 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5665 	}
   5666 
   5667 	if (phy_reset != 0)
   5668 		wm_get_cfg_done(sc);
   5669 
   5670 	/* Reload EEPROM */
   5671 	switch (sc->sc_type) {
   5672 	case WM_T_82542_2_0:
   5673 	case WM_T_82542_2_1:
   5674 	case WM_T_82543:
   5675 	case WM_T_82544:
   5676 		delay(10);
   5677 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5678 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5679 		CSR_WRITE_FLUSH(sc);
   5680 		delay(2000);
   5681 		break;
   5682 	case WM_T_82540:
   5683 	case WM_T_82545:
   5684 	case WM_T_82545_3:
   5685 	case WM_T_82546:
   5686 	case WM_T_82546_3:
   5687 		delay(5*1000);
   5688 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5689 		break;
   5690 	case WM_T_82541:
   5691 	case WM_T_82541_2:
   5692 	case WM_T_82547:
   5693 	case WM_T_82547_2:
   5694 		delay(20000);
   5695 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5696 		break;
   5697 	case WM_T_82571:
   5698 	case WM_T_82572:
   5699 	case WM_T_82573:
   5700 	case WM_T_82574:
   5701 	case WM_T_82583:
   5702 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5703 			delay(10);
   5704 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5705 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5706 			CSR_WRITE_FLUSH(sc);
   5707 		}
   5708 		/* check EECD_EE_AUTORD */
   5709 		wm_get_auto_rd_done(sc);
   5710 		/*
   5711 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5712 		 * is set.
   5713 		 */
   5714 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5715 		    || (sc->sc_type == WM_T_82583))
   5716 			delay(25*1000);
   5717 		break;
   5718 	case WM_T_82575:
   5719 	case WM_T_82576:
   5720 	case WM_T_82580:
   5721 	case WM_T_I350:
   5722 	case WM_T_I354:
   5723 	case WM_T_I210:
   5724 	case WM_T_I211:
   5725 	case WM_T_80003:
   5726 		/* check EECD_EE_AUTORD */
   5727 		wm_get_auto_rd_done(sc);
   5728 		break;
   5729 	case WM_T_ICH8:
   5730 	case WM_T_ICH9:
   5731 	case WM_T_ICH10:
   5732 	case WM_T_PCH:
   5733 	case WM_T_PCH2:
   5734 	case WM_T_PCH_LPT:
   5735 	case WM_T_PCH_SPT:
   5736 	case WM_T_PCH_CNP:
   5737 		break;
   5738 	default:
   5739 		panic("%s: unknown type\n", __func__);
   5740 	}
   5741 
   5742 	/* Check whether EEPROM is present or not */
   5743 	switch (sc->sc_type) {
   5744 	case WM_T_82575:
   5745 	case WM_T_82576:
   5746 	case WM_T_82580:
   5747 	case WM_T_I350:
   5748 	case WM_T_I354:
   5749 	case WM_T_ICH8:
   5750 	case WM_T_ICH9:
   5751 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5752 			/* Not found */
   5753 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5754 			if (sc->sc_type == WM_T_82575)
   5755 				wm_reset_init_script_82575(sc);
   5756 		}
   5757 		break;
   5758 	default:
   5759 		break;
   5760 	}
   5761 
   5762 	if (phy_reset != 0)
   5763 		wm_phy_post_reset(sc);
   5764 
   5765 	if ((sc->sc_type == WM_T_82580)
   5766 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5767 		/* Clear global device reset status bit */
   5768 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5769 	}
   5770 
   5771 	/* Clear any pending interrupt events. */
   5772 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5773 	reg = CSR_READ(sc, WMREG_ICR);
   5774 	if (wm_is_using_msix(sc)) {
   5775 		if (sc->sc_type != WM_T_82574) {
   5776 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5777 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5778 		} else
   5779 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5780 	}
   5781 
   5782 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5783 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5784 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5785 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5786 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5787 		reg |= KABGTXD_BGSQLBIAS;
   5788 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5789 	}
   5790 
   5791 	/* Reload sc_ctrl */
   5792 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5793 
   5794 	wm_set_eee(sc);
   5795 
   5796 	/*
   5797 	 * For PCH, this write will make sure that any noise will be detected
   5798 	 * as a CRC error and be dropped rather than show up as a bad packet
   5799 	 * to the DMA engine
   5800 	 */
   5801 	if (sc->sc_type == WM_T_PCH)
   5802 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5803 
   5804 	if (sc->sc_type >= WM_T_82544)
   5805 		CSR_WRITE(sc, WMREG_WUC, 0);
   5806 
   5807 	if (sc->sc_type < WM_T_82575)
   5808 		wm_disable_aspm(sc); /* Workaround for some chips */
   5809 
   5810 	wm_reset_mdicnfg_82580(sc);
   5811 
   5812 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5813 		wm_pll_workaround_i210(sc);
   5814 
   5815 	if (sc->sc_type == WM_T_80003) {
   5816 		/* Default to TRUE to enable the MDIC W/A */
   5817 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5818 
   5819 		rv = wm_kmrn_readreg(sc,
   5820 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5821 		if (rv == 0) {
   5822 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5823 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5824 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5825 			else
   5826 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5827 		}
   5828 	}
   5829 }
   5830 
   5831 /*
   5832  * wm_add_rxbuf:
   5833  *
   5834  *	Add a receive buffer to the indiciated descriptor.
   5835  */
   5836 static int
   5837 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5838 {
   5839 	struct wm_softc *sc = rxq->rxq_sc;
   5840 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5841 	struct mbuf *m;
   5842 	int error;
   5843 
   5844 	KASSERT(mutex_owned(rxq->rxq_lock));
   5845 
   5846 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5847 	if (m == NULL)
   5848 		return ENOBUFS;
   5849 
   5850 	MCLGET(m, M_DONTWAIT);
   5851 	if ((m->m_flags & M_EXT) == 0) {
   5852 		m_freem(m);
   5853 		return ENOBUFS;
   5854 	}
   5855 
   5856 	if (rxs->rxs_mbuf != NULL)
   5857 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5858 
   5859 	rxs->rxs_mbuf = m;
   5860 
   5861 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5862 	/*
   5863 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5864 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5865 	 */
   5866 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5867 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5868 	if (error) {
   5869 		/* XXX XXX XXX */
   5870 		aprint_error_dev(sc->sc_dev,
   5871 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5872 		panic("wm_add_rxbuf");
   5873 	}
   5874 
   5875 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5876 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5877 
   5878 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5879 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5880 			wm_init_rxdesc(rxq, idx);
   5881 	} else
   5882 		wm_init_rxdesc(rxq, idx);
   5883 
   5884 	return 0;
   5885 }
   5886 
   5887 /*
   5888  * wm_rxdrain:
   5889  *
   5890  *	Drain the receive queue.
   5891  */
   5892 static void
   5893 wm_rxdrain(struct wm_rxqueue *rxq)
   5894 {
   5895 	struct wm_softc *sc = rxq->rxq_sc;
   5896 	struct wm_rxsoft *rxs;
   5897 	int i;
   5898 
   5899 	KASSERT(mutex_owned(rxq->rxq_lock));
   5900 
   5901 	for (i = 0; i < WM_NRXDESC; i++) {
   5902 		rxs = &rxq->rxq_soft[i];
   5903 		if (rxs->rxs_mbuf != NULL) {
   5904 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5905 			m_freem(rxs->rxs_mbuf);
   5906 			rxs->rxs_mbuf = NULL;
   5907 		}
   5908 	}
   5909 }
   5910 
   5911 /*
   5912  * Setup registers for RSS.
   5913  *
   5914  * XXX not yet VMDq support
   5915  */
   5916 static void
   5917 wm_init_rss(struct wm_softc *sc)
   5918 {
   5919 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5920 	int i;
   5921 
   5922 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5923 
   5924 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5925 		unsigned int qid, reta_ent;
   5926 
   5927 		qid  = i % sc->sc_nqueues;
   5928 		switch (sc->sc_type) {
   5929 		case WM_T_82574:
   5930 			reta_ent = __SHIFTIN(qid,
   5931 			    RETA_ENT_QINDEX_MASK_82574);
   5932 			break;
   5933 		case WM_T_82575:
   5934 			reta_ent = __SHIFTIN(qid,
   5935 			    RETA_ENT_QINDEX1_MASK_82575);
   5936 			break;
   5937 		default:
   5938 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5939 			break;
   5940 		}
   5941 
   5942 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5943 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5944 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5945 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5946 	}
   5947 
   5948 	rss_getkey((uint8_t *)rss_key);
   5949 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5950 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5951 
   5952 	if (sc->sc_type == WM_T_82574)
   5953 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5954 	else
   5955 		mrqc = MRQC_ENABLE_RSS_MQ;
   5956 
   5957 	/*
   5958 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5959 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5960 	 */
   5961 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5962 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5963 #if 0
   5964 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5965 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5966 #endif
   5967 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5968 
   5969 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5970 }
   5971 
   5972 /*
   5973  * Adjust TX and RX queue numbers which the system actulally uses.
   5974  *
   5975  * The numbers are affected by below parameters.
   5976  *     - The nubmer of hardware queues
   5977  *     - The number of MSI-X vectors (= "nvectors" argument)
   5978  *     - ncpu
   5979  */
   5980 static void
   5981 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5982 {
   5983 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5984 
   5985 	if (nvectors < 2) {
   5986 		sc->sc_nqueues = 1;
   5987 		return;
   5988 	}
   5989 
   5990 	switch (sc->sc_type) {
   5991 	case WM_T_82572:
   5992 		hw_ntxqueues = 2;
   5993 		hw_nrxqueues = 2;
   5994 		break;
   5995 	case WM_T_82574:
   5996 		hw_ntxqueues = 2;
   5997 		hw_nrxqueues = 2;
   5998 		break;
   5999 	case WM_T_82575:
   6000 		hw_ntxqueues = 4;
   6001 		hw_nrxqueues = 4;
   6002 		break;
   6003 	case WM_T_82576:
   6004 		hw_ntxqueues = 16;
   6005 		hw_nrxqueues = 16;
   6006 		break;
   6007 	case WM_T_82580:
   6008 	case WM_T_I350:
   6009 	case WM_T_I354:
   6010 		hw_ntxqueues = 8;
   6011 		hw_nrxqueues = 8;
   6012 		break;
   6013 	case WM_T_I210:
   6014 		hw_ntxqueues = 4;
   6015 		hw_nrxqueues = 4;
   6016 		break;
   6017 	case WM_T_I211:
   6018 		hw_ntxqueues = 2;
   6019 		hw_nrxqueues = 2;
   6020 		break;
   6021 		/*
   6022 		 * The below Ethernet controllers do not support MSI-X;
   6023 		 * this driver doesn't let them use multiqueue.
   6024 		 *     - WM_T_80003
   6025 		 *     - WM_T_ICH8
   6026 		 *     - WM_T_ICH9
   6027 		 *     - WM_T_ICH10
   6028 		 *     - WM_T_PCH
   6029 		 *     - WM_T_PCH2
   6030 		 *     - WM_T_PCH_LPT
   6031 		 */
   6032 	default:
   6033 		hw_ntxqueues = 1;
   6034 		hw_nrxqueues = 1;
   6035 		break;
   6036 	}
   6037 
   6038 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   6039 
   6040 	/*
   6041 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   6042 	 * the number of queues used actually.
   6043 	 */
   6044 	if (nvectors < hw_nqueues + 1)
   6045 		sc->sc_nqueues = nvectors - 1;
   6046 	else
   6047 		sc->sc_nqueues = hw_nqueues;
   6048 
   6049 	/*
   6050 	 * As queues more than CPUs cannot improve scaling, we limit
   6051 	 * the number of queues used actually.
   6052 	 */
   6053 	if (ncpu < sc->sc_nqueues)
   6054 		sc->sc_nqueues = ncpu;
   6055 }
   6056 
   6057 static inline bool
   6058 wm_is_using_msix(struct wm_softc *sc)
   6059 {
   6060 
   6061 	return (sc->sc_nintrs > 1);
   6062 }
   6063 
   6064 static inline bool
   6065 wm_is_using_multiqueue(struct wm_softc *sc)
   6066 {
   6067 
   6068 	return (sc->sc_nqueues > 1);
   6069 }
   6070 
   6071 static int
   6072 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   6073 {
   6074 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   6075 
   6076 	wmq->wmq_id = qidx;
   6077 	wmq->wmq_intr_idx = intr_idx;
   6078 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   6079 	    wm_handle_queue, wmq);
   6080 	if (wmq->wmq_si != NULL)
   6081 		return 0;
   6082 
   6083 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   6084 	    wmq->wmq_id);
   6085 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   6086 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6087 	return ENOMEM;
   6088 }
   6089 
   6090 /*
   6091  * Both single interrupt MSI and INTx can use this function.
   6092  */
   6093 static int
   6094 wm_setup_legacy(struct wm_softc *sc)
   6095 {
   6096 	pci_chipset_tag_t pc = sc->sc_pc;
   6097 	const char *intrstr = NULL;
   6098 	char intrbuf[PCI_INTRSTR_LEN];
   6099 	int error;
   6100 
   6101 	error = wm_alloc_txrx_queues(sc);
   6102 	if (error) {
   6103 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6104 		    error);
   6105 		return ENOMEM;
   6106 	}
   6107 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   6108 	    sizeof(intrbuf));
   6109 #ifdef WM_MPSAFE
   6110 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   6111 #endif
   6112 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   6113 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   6114 	if (sc->sc_ihs[0] == NULL) {
   6115 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   6116 		    (pci_intr_type(pc, sc->sc_intrs[0])
   6117 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6118 		return ENOMEM;
   6119 	}
   6120 
   6121 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   6122 	sc->sc_nintrs = 1;
   6123 
   6124 	return wm_softint_establish_queue(sc, 0, 0);
   6125 }
   6126 
   6127 static int
   6128 wm_setup_msix(struct wm_softc *sc)
   6129 {
   6130 	void *vih;
   6131 	kcpuset_t *affinity;
   6132 	int qidx, error, intr_idx, txrx_established;
   6133 	pci_chipset_tag_t pc = sc->sc_pc;
   6134 	const char *intrstr = NULL;
   6135 	char intrbuf[PCI_INTRSTR_LEN];
   6136 	char intr_xname[INTRDEVNAMEBUF];
   6137 
   6138 	if (sc->sc_nqueues < ncpu) {
   6139 		/*
   6140 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   6141 		 * interrupts start from CPU#1.
   6142 		 */
   6143 		sc->sc_affinity_offset = 1;
   6144 	} else {
   6145 		/*
   6146 		 * In this case, this device use all CPUs. So, we unify
   6147 		 * affinitied cpu_index to msix vector number for readability.
   6148 		 */
   6149 		sc->sc_affinity_offset = 0;
   6150 	}
   6151 
   6152 	error = wm_alloc_txrx_queues(sc);
   6153 	if (error) {
   6154 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6155 		    error);
   6156 		return ENOMEM;
   6157 	}
   6158 
   6159 	kcpuset_create(&affinity, false);
   6160 	intr_idx = 0;
   6161 
   6162 	/*
   6163 	 * TX and RX
   6164 	 */
   6165 	txrx_established = 0;
   6166 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6167 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6168 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   6169 
   6170 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6171 		    sizeof(intrbuf));
   6172 #ifdef WM_MPSAFE
   6173 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   6174 		    PCI_INTR_MPSAFE, true);
   6175 #endif
   6176 		memset(intr_xname, 0, sizeof(intr_xname));
   6177 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   6178 		    device_xname(sc->sc_dev), qidx);
   6179 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6180 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   6181 		if (vih == NULL) {
   6182 			aprint_error_dev(sc->sc_dev,
   6183 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   6184 			    intrstr ? " at " : "",
   6185 			    intrstr ? intrstr : "");
   6186 
   6187 			goto fail;
   6188 		}
   6189 		kcpuset_zero(affinity);
   6190 		/* Round-robin affinity */
   6191 		kcpuset_set(affinity, affinity_to);
   6192 		error = interrupt_distribute(vih, affinity, NULL);
   6193 		if (error == 0) {
   6194 			aprint_normal_dev(sc->sc_dev,
   6195 			    "for TX and RX interrupting at %s affinity to %u\n",
   6196 			    intrstr, affinity_to);
   6197 		} else {
   6198 			aprint_normal_dev(sc->sc_dev,
   6199 			    "for TX and RX interrupting at %s\n", intrstr);
   6200 		}
   6201 		sc->sc_ihs[intr_idx] = vih;
   6202 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   6203 			goto fail;
   6204 		txrx_established++;
   6205 		intr_idx++;
   6206 	}
   6207 
   6208 	/* LINK */
   6209 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6210 	    sizeof(intrbuf));
   6211 #ifdef WM_MPSAFE
   6212 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   6213 #endif
   6214 	memset(intr_xname, 0, sizeof(intr_xname));
   6215 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   6216 	    device_xname(sc->sc_dev));
   6217 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6218 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   6219 	if (vih == NULL) {
   6220 		aprint_error_dev(sc->sc_dev,
   6221 		    "unable to establish MSI-X(for LINK)%s%s\n",
   6222 		    intrstr ? " at " : "",
   6223 		    intrstr ? intrstr : "");
   6224 
   6225 		goto fail;
   6226 	}
   6227 	/* Keep default affinity to LINK interrupt */
   6228 	aprint_normal_dev(sc->sc_dev,
   6229 	    "for LINK interrupting at %s\n", intrstr);
   6230 	sc->sc_ihs[intr_idx] = vih;
   6231 	sc->sc_link_intr_idx = intr_idx;
   6232 
   6233 	sc->sc_nintrs = sc->sc_nqueues + 1;
   6234 	kcpuset_destroy(affinity);
   6235 	return 0;
   6236 
   6237  fail:
   6238 	for (qidx = 0; qidx < txrx_established; qidx++) {
   6239 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6240 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   6241 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6242 	}
   6243 
   6244 	kcpuset_destroy(affinity);
   6245 	return ENOMEM;
   6246 }
   6247 
   6248 static void
   6249 wm_unset_stopping_flags(struct wm_softc *sc)
   6250 {
   6251 	int i;
   6252 
   6253 	KASSERT(WM_CORE_LOCKED(sc));
   6254 
   6255 	/* Must unset stopping flags in ascending order. */
   6256 	for (i = 0; i < sc->sc_nqueues; i++) {
   6257 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6258 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6259 
   6260 		mutex_enter(txq->txq_lock);
   6261 		txq->txq_stopping = false;
   6262 		mutex_exit(txq->txq_lock);
   6263 
   6264 		mutex_enter(rxq->rxq_lock);
   6265 		rxq->rxq_stopping = false;
   6266 		mutex_exit(rxq->rxq_lock);
   6267 	}
   6268 
   6269 	sc->sc_core_stopping = false;
   6270 }
   6271 
   6272 static void
   6273 wm_set_stopping_flags(struct wm_softc *sc)
   6274 {
   6275 	int i;
   6276 
   6277 	KASSERT(WM_CORE_LOCKED(sc));
   6278 
   6279 	sc->sc_core_stopping = true;
   6280 
   6281 	/* Must set stopping flags in ascending order. */
   6282 	for (i = 0; i < sc->sc_nqueues; i++) {
   6283 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6284 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6285 
   6286 		mutex_enter(rxq->rxq_lock);
   6287 		rxq->rxq_stopping = true;
   6288 		mutex_exit(rxq->rxq_lock);
   6289 
   6290 		mutex_enter(txq->txq_lock);
   6291 		txq->txq_stopping = true;
   6292 		mutex_exit(txq->txq_lock);
   6293 	}
   6294 }
   6295 
   6296 /*
   6297  * Write interrupt interval value to ITR or EITR
   6298  */
   6299 static void
   6300 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   6301 {
   6302 
   6303 	if (!wmq->wmq_set_itr)
   6304 		return;
   6305 
   6306 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6307 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   6308 
   6309 		/*
   6310 		 * 82575 doesn't have CNT_INGR field.
   6311 		 * So, overwrite counter field by software.
   6312 		 */
   6313 		if (sc->sc_type == WM_T_82575)
   6314 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   6315 		else
   6316 			eitr |= EITR_CNT_INGR;
   6317 
   6318 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   6319 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   6320 		/*
   6321 		 * 82574 has both ITR and EITR. SET EITR when we use
   6322 		 * the multi queue function with MSI-X.
   6323 		 */
   6324 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   6325 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   6326 	} else {
   6327 		KASSERT(wmq->wmq_id == 0);
   6328 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   6329 	}
   6330 
   6331 	wmq->wmq_set_itr = false;
   6332 }
   6333 
   6334 /*
   6335  * TODO
   6336  * Below dynamic calculation of itr is almost the same as Linux igb,
   6337  * however it does not fit to wm(4). So, we will have been disable AIM
   6338  * until we will find appropriate calculation of itr.
   6339  */
   6340 /*
   6341  * Calculate interrupt interval value to be going to write register in
   6342  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   6343  */
   6344 static void
   6345 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   6346 {
   6347 #ifdef NOTYET
   6348 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6349 	struct wm_txqueue *txq = &wmq->wmq_txq;
   6350 	uint32_t avg_size = 0;
   6351 	uint32_t new_itr;
   6352 
   6353 	if (rxq->rxq_packets)
   6354 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   6355 	if (txq->txq_packets)
   6356 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   6357 
   6358 	if (avg_size == 0) {
   6359 		new_itr = 450; /* restore default value */
   6360 		goto out;
   6361 	}
   6362 
   6363 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   6364 	avg_size += 24;
   6365 
   6366 	/* Don't starve jumbo frames */
   6367 	avg_size = uimin(avg_size, 3000);
   6368 
   6369 	/* Give a little boost to mid-size frames */
   6370 	if ((avg_size > 300) && (avg_size < 1200))
   6371 		new_itr = avg_size / 3;
   6372 	else
   6373 		new_itr = avg_size / 2;
   6374 
   6375 out:
   6376 	/*
   6377 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   6378 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   6379 	 */
   6380 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   6381 		new_itr *= 4;
   6382 
   6383 	if (new_itr != wmq->wmq_itr) {
   6384 		wmq->wmq_itr = new_itr;
   6385 		wmq->wmq_set_itr = true;
   6386 	} else
   6387 		wmq->wmq_set_itr = false;
   6388 
   6389 	rxq->rxq_packets = 0;
   6390 	rxq->rxq_bytes = 0;
   6391 	txq->txq_packets = 0;
   6392 	txq->txq_bytes = 0;
   6393 #endif
   6394 }
   6395 
   6396 static void
   6397 wm_init_sysctls(struct wm_softc *sc)
   6398 {
   6399 	struct sysctllog **log;
   6400 	const struct sysctlnode *rnode, *qnode, *cnode;
   6401 	int i, rv;
   6402 	const char *dvname;
   6403 
   6404 	log = &sc->sc_sysctllog;
   6405 	dvname = device_xname(sc->sc_dev);
   6406 
   6407 	rv = sysctl_createv(log, 0, NULL, &rnode,
   6408 	    0, CTLTYPE_NODE, dvname,
   6409 	    SYSCTL_DESCR("wm information and settings"),
   6410 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   6411 	if (rv != 0)
   6412 		goto err;
   6413 
   6414 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6415 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   6416 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   6417 	if (rv != 0)
   6418 		goto teardown;
   6419 
   6420 	for (i = 0; i < sc->sc_nqueues; i++) {
   6421 		struct wm_queue *wmq = &sc->sc_queue[i];
   6422 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6423 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6424 
   6425 		snprintf(sc->sc_queue[i].sysctlname,
   6426 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   6427 
   6428 		if (sysctl_createv(log, 0, &rnode, &qnode,
   6429 		    0, CTLTYPE_NODE,
   6430 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   6431 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   6432 			break;
   6433 
   6434 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6435 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6436 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   6437 		    NULL, 0, &txq->txq_free,
   6438 		    0, CTL_CREATE, CTL_EOL) != 0)
   6439 			break;
   6440 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6441 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6442 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
   6443 		    wm_sysctl_tdh_handler, 0, (void *)txq,
   6444 		    0, CTL_CREATE, CTL_EOL) != 0)
   6445 			break;
   6446 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6447 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6448 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
   6449 		    wm_sysctl_tdt_handler, 0, (void *)txq,
   6450 		    0, CTL_CREATE, CTL_EOL) != 0)
   6451 			break;
   6452 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6453 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6454 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6455 		    NULL, 0, &txq->txq_next,
   6456 		    0, CTL_CREATE, CTL_EOL) != 0)
   6457 			break;
   6458 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6459 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6460 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
   6461 		    NULL, 0, &txq->txq_sfree,
   6462 		    0, CTL_CREATE, CTL_EOL) != 0)
   6463 			break;
   6464 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6465 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6466 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
   6467 		    NULL, 0, &txq->txq_snext,
   6468 		    0, CTL_CREATE, CTL_EOL) != 0)
   6469 			break;
   6470 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6471 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6472 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
   6473 		    NULL, 0, &txq->txq_sdirty,
   6474 		    0, CTL_CREATE, CTL_EOL) != 0)
   6475 			break;
   6476 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6477 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6478 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
   6479 		    NULL, 0, &txq->txq_flags,
   6480 		    0, CTL_CREATE, CTL_EOL) != 0)
   6481 			break;
   6482 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6483 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6484 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
   6485 		    NULL, 0, &txq->txq_stopping,
   6486 		    0, CTL_CREATE, CTL_EOL) != 0)
   6487 			break;
   6488 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6489 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6490 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
   6491 		    NULL, 0, &txq->txq_sending,
   6492 		    0, CTL_CREATE, CTL_EOL) != 0)
   6493 			break;
   6494 
   6495 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6496 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6497 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6498 		    NULL, 0, &rxq->rxq_ptr,
   6499 		    0, CTL_CREATE, CTL_EOL) != 0)
   6500 			break;
   6501 	}
   6502 
   6503 #ifdef WM_DEBUG
   6504 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6505 	    CTLTYPE_INT, "debug_flags",
   6506 	    SYSCTL_DESCR(
   6507 		    "Debug flags:\n"	\
   6508 		    "\t0x01 LINK\n"	\
   6509 		    "\t0x02 TX\n"	\
   6510 		    "\t0x04 RX\n"	\
   6511 		    "\t0x08 GMII\n"	\
   6512 		    "\t0x10 MANAGE\n"	\
   6513 		    "\t0x20 NVM\n"	\
   6514 		    "\t0x40 INIT\n"	\
   6515 		    "\t0x80 LOCK"),
   6516 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6517 	if (rv != 0)
   6518 		goto teardown;
   6519 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6520 	    CTLTYPE_BOOL, "trigger_reset",
   6521 	    SYSCTL_DESCR("Trigger an interface reset"),
   6522 	    NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
   6523 	if (rv != 0)
   6524 		goto teardown;
   6525 #endif
   6526 
   6527 	return;
   6528 
   6529 teardown:
   6530 	sysctl_teardown(log);
   6531 err:
   6532 	sc->sc_sysctllog = NULL;
   6533 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6534 	    __func__, rv);
   6535 }
   6536 
   6537 /*
   6538  * wm_init:		[ifnet interface function]
   6539  *
   6540  *	Initialize the interface.
   6541  */
   6542 static int
   6543 wm_init(struct ifnet *ifp)
   6544 {
   6545 	struct wm_softc *sc = ifp->if_softc;
   6546 	int ret;
   6547 
   6548 	KASSERT(IFNET_LOCKED(ifp));
   6549 
   6550 	if (sc->sc_dying)
   6551 		return ENXIO;
   6552 
   6553 	WM_CORE_LOCK(sc);
   6554 	ret = wm_init_locked(ifp);
   6555 	WM_CORE_UNLOCK(sc);
   6556 
   6557 	return ret;
   6558 }
   6559 
   6560 static int
   6561 wm_init_locked(struct ifnet *ifp)
   6562 {
   6563 	struct wm_softc *sc = ifp->if_softc;
   6564 	struct ethercom *ec = &sc->sc_ethercom;
   6565 	int i, j, trynum, error = 0;
   6566 	uint32_t reg, sfp_mask = 0;
   6567 
   6568 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6569 		device_xname(sc->sc_dev), __func__));
   6570 	KASSERT(IFNET_LOCKED(ifp));
   6571 	KASSERT(WM_CORE_LOCKED(sc));
   6572 
   6573 	/*
   6574 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6575 	 * There is a small but measurable benefit to avoiding the adjusment
   6576 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6577 	 * on such platforms.  One possibility is that the DMA itself is
   6578 	 * slightly more efficient if the front of the entire packet (instead
   6579 	 * of the front of the headers) is aligned.
   6580 	 *
   6581 	 * Note we must always set align_tweak to 0 if we are using
   6582 	 * jumbo frames.
   6583 	 */
   6584 #ifdef __NO_STRICT_ALIGNMENT
   6585 	sc->sc_align_tweak = 0;
   6586 #else
   6587 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6588 		sc->sc_align_tweak = 0;
   6589 	else
   6590 		sc->sc_align_tweak = 2;
   6591 #endif /* __NO_STRICT_ALIGNMENT */
   6592 
   6593 	/* Cancel any pending I/O. */
   6594 	wm_stop_locked(ifp, false, false);
   6595 
   6596 	/* Update statistics before reset */
   6597 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6598 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6599 
   6600 	/* >= PCH_SPT hardware workaround before reset. */
   6601 	if (sc->sc_type >= WM_T_PCH_SPT)
   6602 		wm_flush_desc_rings(sc);
   6603 
   6604 	/* Reset the chip to a known state. */
   6605 	wm_reset(sc);
   6606 
   6607 	/*
   6608 	 * AMT based hardware can now take control from firmware
   6609 	 * Do this after reset.
   6610 	 */
   6611 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6612 		wm_get_hw_control(sc);
   6613 
   6614 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6615 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6616 		wm_legacy_irq_quirk_spt(sc);
   6617 
   6618 	/* Init hardware bits */
   6619 	wm_initialize_hardware_bits(sc);
   6620 
   6621 	/* Reset the PHY. */
   6622 	if (sc->sc_flags & WM_F_HAS_MII)
   6623 		wm_gmii_reset(sc);
   6624 
   6625 	if (sc->sc_type >= WM_T_ICH8) {
   6626 		reg = CSR_READ(sc, WMREG_GCR);
   6627 		/*
   6628 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6629 		 * default after reset.
   6630 		 */
   6631 		if (sc->sc_type == WM_T_ICH8)
   6632 			reg |= GCR_NO_SNOOP_ALL;
   6633 		else
   6634 			reg &= ~GCR_NO_SNOOP_ALL;
   6635 		CSR_WRITE(sc, WMREG_GCR, reg);
   6636 	}
   6637 
   6638 	if ((sc->sc_type >= WM_T_ICH8)
   6639 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6640 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6641 
   6642 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6643 		reg |= CTRL_EXT_RO_DIS;
   6644 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6645 	}
   6646 
   6647 	/* Calculate (E)ITR value */
   6648 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6649 		/*
   6650 		 * For NEWQUEUE's EITR (except for 82575).
   6651 		 * 82575's EITR should be set same throttling value as other
   6652 		 * old controllers' ITR because the interrupt/sec calculation
   6653 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6654 		 *
   6655 		 * 82574's EITR should be set same throttling value as ITR.
   6656 		 *
   6657 		 * For N interrupts/sec, set this value to:
   6658 		 * 1,000,000 / N in contrast to ITR throttling value.
   6659 		 */
   6660 		sc->sc_itr_init = 450;
   6661 	} else if (sc->sc_type >= WM_T_82543) {
   6662 		/*
   6663 		 * Set up the interrupt throttling register (units of 256ns)
   6664 		 * Note that a footnote in Intel's documentation says this
   6665 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6666 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6667 		 * that that is also true for the 1024ns units of the other
   6668 		 * interrupt-related timer registers -- so, really, we ought
   6669 		 * to divide this value by 4 when the link speed is low.
   6670 		 *
   6671 		 * XXX implement this division at link speed change!
   6672 		 */
   6673 
   6674 		/*
   6675 		 * For N interrupts/sec, set this value to:
   6676 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6677 		 * absolute and packet timer values to this value
   6678 		 * divided by 4 to get "simple timer" behavior.
   6679 		 */
   6680 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6681 	}
   6682 
   6683 	error = wm_init_txrx_queues(sc);
   6684 	if (error)
   6685 		goto out;
   6686 
   6687 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6688 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6689 	    (sc->sc_type >= WM_T_82575))
   6690 		wm_serdes_power_up_link_82575(sc);
   6691 
   6692 	/* Clear out the VLAN table -- we don't use it (yet). */
   6693 	CSR_WRITE(sc, WMREG_VET, 0);
   6694 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6695 		trynum = 10; /* Due to hw errata */
   6696 	else
   6697 		trynum = 1;
   6698 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6699 		for (j = 0; j < trynum; j++)
   6700 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6701 
   6702 	/*
   6703 	 * Set up flow-control parameters.
   6704 	 *
   6705 	 * XXX Values could probably stand some tuning.
   6706 	 */
   6707 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6708 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6709 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6710 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6711 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6712 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6713 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6714 	}
   6715 
   6716 	sc->sc_fcrtl = FCRTL_DFLT;
   6717 	if (sc->sc_type < WM_T_82543) {
   6718 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6719 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6720 	} else {
   6721 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6722 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6723 	}
   6724 
   6725 	if (sc->sc_type == WM_T_80003)
   6726 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6727 	else
   6728 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6729 
   6730 	/* Writes the control register. */
   6731 	wm_set_vlan(sc);
   6732 
   6733 	if (sc->sc_flags & WM_F_HAS_MII) {
   6734 		uint16_t kmreg;
   6735 
   6736 		switch (sc->sc_type) {
   6737 		case WM_T_80003:
   6738 		case WM_T_ICH8:
   6739 		case WM_T_ICH9:
   6740 		case WM_T_ICH10:
   6741 		case WM_T_PCH:
   6742 		case WM_T_PCH2:
   6743 		case WM_T_PCH_LPT:
   6744 		case WM_T_PCH_SPT:
   6745 		case WM_T_PCH_CNP:
   6746 			/*
   6747 			 * Set the mac to wait the maximum time between each
   6748 			 * iteration and increase the max iterations when
   6749 			 * polling the phy; this fixes erroneous timeouts at
   6750 			 * 10Mbps.
   6751 			 */
   6752 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6753 			    0xFFFF);
   6754 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6755 			    &kmreg);
   6756 			kmreg |= 0x3F;
   6757 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6758 			    kmreg);
   6759 			break;
   6760 		default:
   6761 			break;
   6762 		}
   6763 
   6764 		if (sc->sc_type == WM_T_80003) {
   6765 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6766 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6767 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6768 
   6769 			/* Bypass RX and TX FIFOs */
   6770 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6771 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6772 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6773 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6774 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6775 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6776 		}
   6777 	}
   6778 #if 0
   6779 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6780 #endif
   6781 
   6782 	/* Set up checksum offload parameters. */
   6783 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6784 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6785 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6786 		reg |= RXCSUM_IPOFL;
   6787 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6788 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6789 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6790 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6791 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6792 
   6793 	/* Set registers about MSI-X */
   6794 	if (wm_is_using_msix(sc)) {
   6795 		uint32_t ivar, qintr_idx;
   6796 		struct wm_queue *wmq;
   6797 		unsigned int qid;
   6798 
   6799 		if (sc->sc_type == WM_T_82575) {
   6800 			/* Interrupt control */
   6801 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6802 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6803 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6804 
   6805 			/* TX and RX */
   6806 			for (i = 0; i < sc->sc_nqueues; i++) {
   6807 				wmq = &sc->sc_queue[i];
   6808 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6809 				    EITR_TX_QUEUE(wmq->wmq_id)
   6810 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6811 			}
   6812 			/* Link status */
   6813 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6814 			    EITR_OTHER);
   6815 		} else if (sc->sc_type == WM_T_82574) {
   6816 			/* Interrupt control */
   6817 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6818 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6819 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6820 
   6821 			/*
   6822 			 * Work around issue with spurious interrupts
   6823 			 * in MSI-X mode.
   6824 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6825 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6826 			 */
   6827 			reg = CSR_READ(sc, WMREG_RFCTL);
   6828 			reg |= WMREG_RFCTL_ACKDIS;
   6829 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6830 
   6831 			ivar = 0;
   6832 			/* TX and RX */
   6833 			for (i = 0; i < sc->sc_nqueues; i++) {
   6834 				wmq = &sc->sc_queue[i];
   6835 				qid = wmq->wmq_id;
   6836 				qintr_idx = wmq->wmq_intr_idx;
   6837 
   6838 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6839 				    IVAR_TX_MASK_Q_82574(qid));
   6840 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6841 				    IVAR_RX_MASK_Q_82574(qid));
   6842 			}
   6843 			/* Link status */
   6844 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6845 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6846 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6847 		} else {
   6848 			/* Interrupt control */
   6849 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6850 			    | GPIE_EIAME | GPIE_PBA);
   6851 
   6852 			switch (sc->sc_type) {
   6853 			case WM_T_82580:
   6854 			case WM_T_I350:
   6855 			case WM_T_I354:
   6856 			case WM_T_I210:
   6857 			case WM_T_I211:
   6858 				/* TX and RX */
   6859 				for (i = 0; i < sc->sc_nqueues; i++) {
   6860 					wmq = &sc->sc_queue[i];
   6861 					qid = wmq->wmq_id;
   6862 					qintr_idx = wmq->wmq_intr_idx;
   6863 
   6864 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6865 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6866 					ivar |= __SHIFTIN((qintr_idx
   6867 						| IVAR_VALID),
   6868 					    IVAR_TX_MASK_Q(qid));
   6869 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6870 					ivar |= __SHIFTIN((qintr_idx
   6871 						| IVAR_VALID),
   6872 					    IVAR_RX_MASK_Q(qid));
   6873 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6874 				}
   6875 				break;
   6876 			case WM_T_82576:
   6877 				/* TX and RX */
   6878 				for (i = 0; i < sc->sc_nqueues; i++) {
   6879 					wmq = &sc->sc_queue[i];
   6880 					qid = wmq->wmq_id;
   6881 					qintr_idx = wmq->wmq_intr_idx;
   6882 
   6883 					ivar = CSR_READ(sc,
   6884 					    WMREG_IVAR_Q_82576(qid));
   6885 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6886 					ivar |= __SHIFTIN((qintr_idx
   6887 						| IVAR_VALID),
   6888 					    IVAR_TX_MASK_Q_82576(qid));
   6889 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6890 					ivar |= __SHIFTIN((qintr_idx
   6891 						| IVAR_VALID),
   6892 					    IVAR_RX_MASK_Q_82576(qid));
   6893 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6894 					    ivar);
   6895 				}
   6896 				break;
   6897 			default:
   6898 				break;
   6899 			}
   6900 
   6901 			/* Link status */
   6902 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6903 			    IVAR_MISC_OTHER);
   6904 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6905 		}
   6906 
   6907 		if (wm_is_using_multiqueue(sc)) {
   6908 			wm_init_rss(sc);
   6909 
   6910 			/*
   6911 			** NOTE: Receive Full-Packet Checksum Offload
   6912 			** is mutually exclusive with Multiqueue. However
   6913 			** this is not the same as TCP/IP checksums which
   6914 			** still work.
   6915 			*/
   6916 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6917 			reg |= RXCSUM_PCSD;
   6918 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6919 		}
   6920 	}
   6921 
   6922 	/* Set up the interrupt registers. */
   6923 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6924 
   6925 	/* Enable SFP module insertion interrupt if it's required */
   6926 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6927 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6928 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6929 		sfp_mask = ICR_GPI(0);
   6930 	}
   6931 
   6932 	if (wm_is_using_msix(sc)) {
   6933 		uint32_t mask;
   6934 		struct wm_queue *wmq;
   6935 
   6936 		switch (sc->sc_type) {
   6937 		case WM_T_82574:
   6938 			mask = 0;
   6939 			for (i = 0; i < sc->sc_nqueues; i++) {
   6940 				wmq = &sc->sc_queue[i];
   6941 				mask |= ICR_TXQ(wmq->wmq_id);
   6942 				mask |= ICR_RXQ(wmq->wmq_id);
   6943 			}
   6944 			mask |= ICR_OTHER;
   6945 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6946 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6947 			break;
   6948 		default:
   6949 			if (sc->sc_type == WM_T_82575) {
   6950 				mask = 0;
   6951 				for (i = 0; i < sc->sc_nqueues; i++) {
   6952 					wmq = &sc->sc_queue[i];
   6953 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6954 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6955 				}
   6956 				mask |= EITR_OTHER;
   6957 			} else {
   6958 				mask = 0;
   6959 				for (i = 0; i < sc->sc_nqueues; i++) {
   6960 					wmq = &sc->sc_queue[i];
   6961 					mask |= 1 << wmq->wmq_intr_idx;
   6962 				}
   6963 				mask |= 1 << sc->sc_link_intr_idx;
   6964 			}
   6965 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6966 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6967 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6968 
   6969 			/* For other interrupts */
   6970 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6971 			break;
   6972 		}
   6973 	} else {
   6974 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6975 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6976 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6977 	}
   6978 
   6979 	/* Set up the inter-packet gap. */
   6980 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6981 
   6982 	if (sc->sc_type >= WM_T_82543) {
   6983 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6984 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6985 			wm_itrs_writereg(sc, wmq);
   6986 		}
   6987 		/*
   6988 		 * Link interrupts occur much less than TX
   6989 		 * interrupts and RX interrupts. So, we don't
   6990 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6991 		 * FreeBSD's if_igb.
   6992 		 */
   6993 	}
   6994 
   6995 	/* Set the VLAN EtherType. */
   6996 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6997 
   6998 	/*
   6999 	 * Set up the transmit control register; we start out with
   7000 	 * a collision distance suitable for FDX, but update it when
   7001 	 * we resolve the media type.
   7002 	 */
   7003 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   7004 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   7005 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7006 	if (sc->sc_type >= WM_T_82571)
   7007 		sc->sc_tctl |= TCTL_MULR;
   7008 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7009 
   7010 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7011 		/* Write TDT after TCTL.EN is set. See the document. */
   7012 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   7013 	}
   7014 
   7015 	if (sc->sc_type == WM_T_80003) {
   7016 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   7017 		reg &= ~TCTL_EXT_GCEX_MASK;
   7018 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   7019 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   7020 	}
   7021 
   7022 	/* Set the media. */
   7023 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   7024 		goto out;
   7025 
   7026 	/* Configure for OS presence */
   7027 	wm_init_manageability(sc);
   7028 
   7029 	/*
   7030 	 * Set up the receive control register; we actually program the
   7031 	 * register when we set the receive filter. Use multicast address
   7032 	 * offset type 0.
   7033 	 *
   7034 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   7035 	 * don't enable that feature.
   7036 	 */
   7037 	sc->sc_mchash_type = 0;
   7038 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   7039 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   7040 
   7041 	/* 82574 use one buffer extended Rx descriptor. */
   7042 	if (sc->sc_type == WM_T_82574)
   7043 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   7044 
   7045 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   7046 		sc->sc_rctl |= RCTL_SECRC;
   7047 
   7048 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   7049 	    && (ifp->if_mtu > ETHERMTU)) {
   7050 		sc->sc_rctl |= RCTL_LPE;
   7051 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7052 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   7053 	}
   7054 
   7055 	if (MCLBYTES == 2048)
   7056 		sc->sc_rctl |= RCTL_2k;
   7057 	else {
   7058 		if (sc->sc_type >= WM_T_82543) {
   7059 			switch (MCLBYTES) {
   7060 			case 4096:
   7061 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   7062 				break;
   7063 			case 8192:
   7064 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   7065 				break;
   7066 			case 16384:
   7067 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   7068 				break;
   7069 			default:
   7070 				panic("wm_init: MCLBYTES %d unsupported",
   7071 				    MCLBYTES);
   7072 				break;
   7073 			}
   7074 		} else
   7075 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   7076 	}
   7077 
   7078 	/* Enable ECC */
   7079 	switch (sc->sc_type) {
   7080 	case WM_T_82571:
   7081 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   7082 		reg |= PBA_ECC_CORR_EN;
   7083 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   7084 		break;
   7085 	case WM_T_PCH_LPT:
   7086 	case WM_T_PCH_SPT:
   7087 	case WM_T_PCH_CNP:
   7088 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   7089 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   7090 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   7091 
   7092 		sc->sc_ctrl |= CTRL_MEHE;
   7093 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7094 		break;
   7095 	default:
   7096 		break;
   7097 	}
   7098 
   7099 	/*
   7100 	 * Set the receive filter.
   7101 	 *
   7102 	 * For 82575 and 82576, the RX descriptors must be initialized after
   7103 	 * the setting of RCTL.EN in wm_set_filter()
   7104 	 */
   7105 	wm_set_filter(sc);
   7106 
   7107 	/* On 575 and later set RDT only if RX enabled */
   7108 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7109 		int qidx;
   7110 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7111 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   7112 			for (i = 0; i < WM_NRXDESC; i++) {
   7113 				mutex_enter(rxq->rxq_lock);
   7114 				wm_init_rxdesc(rxq, i);
   7115 				mutex_exit(rxq->rxq_lock);
   7116 
   7117 			}
   7118 		}
   7119 	}
   7120 
   7121 	wm_unset_stopping_flags(sc);
   7122 
   7123 	/* Start the one second link check clock. */
   7124 	callout_schedule(&sc->sc_tick_ch, hz);
   7125 
   7126 	/*
   7127 	 * ...all done! (IFNET_LOCKED asserted above.)
   7128 	 */
   7129 	ifp->if_flags |= IFF_RUNNING;
   7130 
   7131  out:
   7132 	/* Save last flags for the callback */
   7133 	sc->sc_if_flags = ifp->if_flags;
   7134 	sc->sc_ec_capenable = ec->ec_capenable;
   7135 	if (error)
   7136 		log(LOG_ERR, "%s: interface not running\n",
   7137 		    device_xname(sc->sc_dev));
   7138 	return error;
   7139 }
   7140 
   7141 /*
   7142  * wm_stop:		[ifnet interface function]
   7143  *
   7144  *	Stop transmission on the interface.
   7145  */
   7146 static void
   7147 wm_stop(struct ifnet *ifp, int disable)
   7148 {
   7149 	struct wm_softc *sc = ifp->if_softc;
   7150 
   7151 	ASSERT_SLEEPABLE();
   7152 	KASSERT(IFNET_LOCKED(ifp));
   7153 
   7154 	WM_CORE_LOCK(sc);
   7155 	wm_stop_locked(ifp, disable ? true : false, true);
   7156 	WM_CORE_UNLOCK(sc);
   7157 
   7158 	/*
   7159 	 * After wm_set_stopping_flags(), it is guaranteed that
   7160 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   7161 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   7162 	 * because it can sleep...
   7163 	 * so, call workqueue_wait() here.
   7164 	 */
   7165 	for (int i = 0; i < sc->sc_nqueues; i++)
   7166 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   7167 	workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
   7168 }
   7169 
   7170 static void
   7171 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   7172 {
   7173 	struct wm_softc *sc = ifp->if_softc;
   7174 	struct wm_txsoft *txs;
   7175 	int i, qidx;
   7176 
   7177 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7178 		device_xname(sc->sc_dev), __func__));
   7179 	KASSERT(IFNET_LOCKED(ifp));
   7180 	KASSERT(WM_CORE_LOCKED(sc));
   7181 
   7182 	wm_set_stopping_flags(sc);
   7183 
   7184 	if (sc->sc_flags & WM_F_HAS_MII) {
   7185 		/* Down the MII. */
   7186 		mii_down(&sc->sc_mii);
   7187 	} else {
   7188 #if 0
   7189 		/* Should we clear PHY's status properly? */
   7190 		wm_reset(sc);
   7191 #endif
   7192 	}
   7193 
   7194 	/* Stop the transmit and receive processes. */
   7195 	CSR_WRITE(sc, WMREG_TCTL, 0);
   7196 	CSR_WRITE(sc, WMREG_RCTL, 0);
   7197 	sc->sc_rctl &= ~RCTL_EN;
   7198 
   7199 	/*
   7200 	 * Clear the interrupt mask to ensure the device cannot assert its
   7201 	 * interrupt line.
   7202 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   7203 	 * service any currently pending or shared interrupt.
   7204 	 */
   7205 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7206 	sc->sc_icr = 0;
   7207 	if (wm_is_using_msix(sc)) {
   7208 		if (sc->sc_type != WM_T_82574) {
   7209 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   7210 			CSR_WRITE(sc, WMREG_EIAC, 0);
   7211 		} else
   7212 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   7213 	}
   7214 
   7215 	/*
   7216 	 * Stop callouts after interrupts are disabled; if we have
   7217 	 * to wait for them, we will be releasing the CORE_LOCK
   7218 	 * briefly, which will unblock interrupts on the current CPU.
   7219 	 */
   7220 
   7221 	/* Stop the one second clock. */
   7222 	if (wait)
   7223 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   7224 	else
   7225 		callout_stop(&sc->sc_tick_ch);
   7226 
   7227 	/* Stop the 82547 Tx FIFO stall check timer. */
   7228 	if (sc->sc_type == WM_T_82547) {
   7229 		if (wait)
   7230 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   7231 		else
   7232 			callout_stop(&sc->sc_txfifo_ch);
   7233 	}
   7234 
   7235 	/* Release any queued transmit buffers. */
   7236 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7237 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   7238 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7239 		struct mbuf *m;
   7240 
   7241 		mutex_enter(txq->txq_lock);
   7242 		txq->txq_sending = false; /* Ensure watchdog disabled */
   7243 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7244 			txs = &txq->txq_soft[i];
   7245 			if (txs->txs_mbuf != NULL) {
   7246 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   7247 				m_freem(txs->txs_mbuf);
   7248 				txs->txs_mbuf = NULL;
   7249 			}
   7250 		}
   7251 		/* Drain txq_interq */
   7252 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7253 			m_freem(m);
   7254 		mutex_exit(txq->txq_lock);
   7255 	}
   7256 
   7257 	/* Mark the interface as down and cancel the watchdog timer. */
   7258 	ifp->if_flags &= ~IFF_RUNNING;
   7259 	sc->sc_if_flags = ifp->if_flags;
   7260 
   7261 	if (disable) {
   7262 		for (i = 0; i < sc->sc_nqueues; i++) {
   7263 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7264 			mutex_enter(rxq->rxq_lock);
   7265 			wm_rxdrain(rxq);
   7266 			mutex_exit(rxq->rxq_lock);
   7267 		}
   7268 	}
   7269 
   7270 #if 0 /* notyet */
   7271 	if (sc->sc_type >= WM_T_82544)
   7272 		CSR_WRITE(sc, WMREG_WUC, 0);
   7273 #endif
   7274 }
   7275 
   7276 static void
   7277 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   7278 {
   7279 	struct mbuf *m;
   7280 	int i;
   7281 
   7282 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   7283 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   7284 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   7285 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   7286 		    m->m_data, m->m_len, m->m_flags);
   7287 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   7288 	    i, i == 1 ? "" : "s");
   7289 }
   7290 
   7291 /*
   7292  * wm_82547_txfifo_stall:
   7293  *
   7294  *	Callout used to wait for the 82547 Tx FIFO to drain,
   7295  *	reset the FIFO pointers, and restart packet transmission.
   7296  */
   7297 static void
   7298 wm_82547_txfifo_stall(void *arg)
   7299 {
   7300 	struct wm_softc *sc = arg;
   7301 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7302 
   7303 	mutex_enter(txq->txq_lock);
   7304 
   7305 	if (txq->txq_stopping)
   7306 		goto out;
   7307 
   7308 	if (txq->txq_fifo_stall) {
   7309 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   7310 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   7311 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   7312 			/*
   7313 			 * Packets have drained.  Stop transmitter, reset
   7314 			 * FIFO pointers, restart transmitter, and kick
   7315 			 * the packet queue.
   7316 			 */
   7317 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   7318 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   7319 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   7320 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   7321 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   7322 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   7323 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   7324 			CSR_WRITE_FLUSH(sc);
   7325 
   7326 			txq->txq_fifo_head = 0;
   7327 			txq->txq_fifo_stall = 0;
   7328 			wm_start_locked(&sc->sc_ethercom.ec_if);
   7329 		} else {
   7330 			/*
   7331 			 * Still waiting for packets to drain; try again in
   7332 			 * another tick.
   7333 			 */
   7334 			callout_schedule(&sc->sc_txfifo_ch, 1);
   7335 		}
   7336 	}
   7337 
   7338 out:
   7339 	mutex_exit(txq->txq_lock);
   7340 }
   7341 
   7342 /*
   7343  * wm_82547_txfifo_bugchk:
   7344  *
   7345  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   7346  *	prevent enqueueing a packet that would wrap around the end
   7347  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   7348  *
   7349  *	We do this by checking the amount of space before the end
   7350  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   7351  *	the Tx FIFO, wait for all remaining packets to drain, reset
   7352  *	the internal FIFO pointers to the beginning, and restart
   7353  *	transmission on the interface.
   7354  */
   7355 #define	WM_FIFO_HDR		0x10
   7356 #define	WM_82547_PAD_LEN	0x3e0
   7357 static int
   7358 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   7359 {
   7360 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7361 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   7362 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   7363 
   7364 	/* Just return if already stalled. */
   7365 	if (txq->txq_fifo_stall)
   7366 		return 1;
   7367 
   7368 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7369 		/* Stall only occurs in half-duplex mode. */
   7370 		goto send_packet;
   7371 	}
   7372 
   7373 	if (len >= WM_82547_PAD_LEN + space) {
   7374 		txq->txq_fifo_stall = 1;
   7375 		callout_schedule(&sc->sc_txfifo_ch, 1);
   7376 		return 1;
   7377 	}
   7378 
   7379  send_packet:
   7380 	txq->txq_fifo_head += len;
   7381 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   7382 		txq->txq_fifo_head -= txq->txq_fifo_size;
   7383 
   7384 	return 0;
   7385 }
   7386 
   7387 static int
   7388 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7389 {
   7390 	int error;
   7391 
   7392 	/*
   7393 	 * Allocate the control data structures, and create and load the
   7394 	 * DMA map for it.
   7395 	 *
   7396 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7397 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7398 	 * both sets within the same 4G segment.
   7399 	 */
   7400 	if (sc->sc_type < WM_T_82544)
   7401 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   7402 	else
   7403 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   7404 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7405 		txq->txq_descsize = sizeof(nq_txdesc_t);
   7406 	else
   7407 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   7408 
   7409 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   7410 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   7411 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   7412 		aprint_error_dev(sc->sc_dev,
   7413 		    "unable to allocate TX control data, error = %d\n",
   7414 		    error);
   7415 		goto fail_0;
   7416 	}
   7417 
   7418 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   7419 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   7420 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7421 		aprint_error_dev(sc->sc_dev,
   7422 		    "unable to map TX control data, error = %d\n", error);
   7423 		goto fail_1;
   7424 	}
   7425 
   7426 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   7427 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   7428 		aprint_error_dev(sc->sc_dev,
   7429 		    "unable to create TX control data DMA map, error = %d\n",
   7430 		    error);
   7431 		goto fail_2;
   7432 	}
   7433 
   7434 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   7435 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   7436 		aprint_error_dev(sc->sc_dev,
   7437 		    "unable to load TX control data DMA map, error = %d\n",
   7438 		    error);
   7439 		goto fail_3;
   7440 	}
   7441 
   7442 	return 0;
   7443 
   7444  fail_3:
   7445 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7446  fail_2:
   7447 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7448 	    WM_TXDESCS_SIZE(txq));
   7449  fail_1:
   7450 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7451  fail_0:
   7452 	return error;
   7453 }
   7454 
   7455 static void
   7456 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7457 {
   7458 
   7459 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   7460 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7461 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7462 	    WM_TXDESCS_SIZE(txq));
   7463 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7464 }
   7465 
   7466 static int
   7467 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7468 {
   7469 	int error;
   7470 	size_t rxq_descs_size;
   7471 
   7472 	/*
   7473 	 * Allocate the control data structures, and create and load the
   7474 	 * DMA map for it.
   7475 	 *
   7476 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7477 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7478 	 * both sets within the same 4G segment.
   7479 	 */
   7480 	rxq->rxq_ndesc = WM_NRXDESC;
   7481 	if (sc->sc_type == WM_T_82574)
   7482 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   7483 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7484 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   7485 	else
   7486 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   7487 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   7488 
   7489 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   7490 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   7491 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   7492 		aprint_error_dev(sc->sc_dev,
   7493 		    "unable to allocate RX control data, error = %d\n",
   7494 		    error);
   7495 		goto fail_0;
   7496 	}
   7497 
   7498 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   7499 		    rxq->rxq_desc_rseg, rxq_descs_size,
   7500 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7501 		aprint_error_dev(sc->sc_dev,
   7502 		    "unable to map RX control data, error = %d\n", error);
   7503 		goto fail_1;
   7504 	}
   7505 
   7506 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7507 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7508 		aprint_error_dev(sc->sc_dev,
   7509 		    "unable to create RX control data DMA map, error = %d\n",
   7510 		    error);
   7511 		goto fail_2;
   7512 	}
   7513 
   7514 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7515 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7516 		aprint_error_dev(sc->sc_dev,
   7517 		    "unable to load RX control data DMA map, error = %d\n",
   7518 		    error);
   7519 		goto fail_3;
   7520 	}
   7521 
   7522 	return 0;
   7523 
   7524  fail_3:
   7525 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7526  fail_2:
   7527 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7528 	    rxq_descs_size);
   7529  fail_1:
   7530 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7531  fail_0:
   7532 	return error;
   7533 }
   7534 
   7535 static void
   7536 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7537 {
   7538 
   7539 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7540 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7541 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7542 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7543 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7544 }
   7545 
   7546 
   7547 static int
   7548 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7549 {
   7550 	int i, error;
   7551 
   7552 	/* Create the transmit buffer DMA maps. */
   7553 	WM_TXQUEUELEN(txq) =
   7554 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7555 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7556 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7557 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7558 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7559 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7560 			aprint_error_dev(sc->sc_dev,
   7561 			    "unable to create Tx DMA map %d, error = %d\n",
   7562 			    i, error);
   7563 			goto fail;
   7564 		}
   7565 	}
   7566 
   7567 	return 0;
   7568 
   7569  fail:
   7570 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7571 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7572 			bus_dmamap_destroy(sc->sc_dmat,
   7573 			    txq->txq_soft[i].txs_dmamap);
   7574 	}
   7575 	return error;
   7576 }
   7577 
   7578 static void
   7579 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7580 {
   7581 	int i;
   7582 
   7583 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7584 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7585 			bus_dmamap_destroy(sc->sc_dmat,
   7586 			    txq->txq_soft[i].txs_dmamap);
   7587 	}
   7588 }
   7589 
   7590 static int
   7591 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7592 {
   7593 	int i, error;
   7594 
   7595 	/* Create the receive buffer DMA maps. */
   7596 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7597 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7598 			    MCLBYTES, 0, 0,
   7599 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7600 			aprint_error_dev(sc->sc_dev,
   7601 			    "unable to create Rx DMA map %d error = %d\n",
   7602 			    i, error);
   7603 			goto fail;
   7604 		}
   7605 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7606 	}
   7607 
   7608 	return 0;
   7609 
   7610  fail:
   7611 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7612 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7613 			bus_dmamap_destroy(sc->sc_dmat,
   7614 			    rxq->rxq_soft[i].rxs_dmamap);
   7615 	}
   7616 	return error;
   7617 }
   7618 
   7619 static void
   7620 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7621 {
   7622 	int i;
   7623 
   7624 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7625 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7626 			bus_dmamap_destroy(sc->sc_dmat,
   7627 			    rxq->rxq_soft[i].rxs_dmamap);
   7628 	}
   7629 }
   7630 
   7631 /*
   7632  * wm_alloc_quques:
   7633  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7634  */
   7635 static int
   7636 wm_alloc_txrx_queues(struct wm_softc *sc)
   7637 {
   7638 	int i, error, tx_done, rx_done;
   7639 
   7640 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7641 	    KM_SLEEP);
   7642 	if (sc->sc_queue == NULL) {
   7643 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7644 		error = ENOMEM;
   7645 		goto fail_0;
   7646 	}
   7647 
   7648 	/* For transmission */
   7649 	error = 0;
   7650 	tx_done = 0;
   7651 	for (i = 0; i < sc->sc_nqueues; i++) {
   7652 #ifdef WM_EVENT_COUNTERS
   7653 		int j;
   7654 		const char *xname;
   7655 #endif
   7656 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7657 		txq->txq_sc = sc;
   7658 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7659 
   7660 		error = wm_alloc_tx_descs(sc, txq);
   7661 		if (error)
   7662 			break;
   7663 		error = wm_alloc_tx_buffer(sc, txq);
   7664 		if (error) {
   7665 			wm_free_tx_descs(sc, txq);
   7666 			break;
   7667 		}
   7668 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7669 		if (txq->txq_interq == NULL) {
   7670 			wm_free_tx_descs(sc, txq);
   7671 			wm_free_tx_buffer(sc, txq);
   7672 			error = ENOMEM;
   7673 			break;
   7674 		}
   7675 
   7676 #ifdef WM_EVENT_COUNTERS
   7677 		xname = device_xname(sc->sc_dev);
   7678 
   7679 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7680 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7681 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7682 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7683 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7684 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7685 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7686 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7687 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7688 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7689 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7690 
   7691 		for (j = 0; j < WM_NTXSEGS; j++) {
   7692 			snprintf(txq->txq_txseg_evcnt_names[j],
   7693 			    sizeof(txq->txq_txseg_evcnt_names[j]),
   7694 			    "txq%02dtxseg%d", i, j);
   7695 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
   7696 			    EVCNT_TYPE_MISC,
   7697 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7698 		}
   7699 
   7700 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7701 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7702 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7703 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7704 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7705 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7706 #endif /* WM_EVENT_COUNTERS */
   7707 
   7708 		tx_done++;
   7709 	}
   7710 	if (error)
   7711 		goto fail_1;
   7712 
   7713 	/* For receive */
   7714 	error = 0;
   7715 	rx_done = 0;
   7716 	for (i = 0; i < sc->sc_nqueues; i++) {
   7717 #ifdef WM_EVENT_COUNTERS
   7718 		const char *xname;
   7719 #endif
   7720 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7721 		rxq->rxq_sc = sc;
   7722 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7723 
   7724 		error = wm_alloc_rx_descs(sc, rxq);
   7725 		if (error)
   7726 			break;
   7727 
   7728 		error = wm_alloc_rx_buffer(sc, rxq);
   7729 		if (error) {
   7730 			wm_free_rx_descs(sc, rxq);
   7731 			break;
   7732 		}
   7733 
   7734 #ifdef WM_EVENT_COUNTERS
   7735 		xname = device_xname(sc->sc_dev);
   7736 
   7737 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7738 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7739 
   7740 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7741 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7742 #endif /* WM_EVENT_COUNTERS */
   7743 
   7744 		rx_done++;
   7745 	}
   7746 	if (error)
   7747 		goto fail_2;
   7748 
   7749 	return 0;
   7750 
   7751  fail_2:
   7752 	for (i = 0; i < rx_done; i++) {
   7753 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7754 		wm_free_rx_buffer(sc, rxq);
   7755 		wm_free_rx_descs(sc, rxq);
   7756 		if (rxq->rxq_lock)
   7757 			mutex_obj_free(rxq->rxq_lock);
   7758 	}
   7759  fail_1:
   7760 	for (i = 0; i < tx_done; i++) {
   7761 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7762 		pcq_destroy(txq->txq_interq);
   7763 		wm_free_tx_buffer(sc, txq);
   7764 		wm_free_tx_descs(sc, txq);
   7765 		if (txq->txq_lock)
   7766 			mutex_obj_free(txq->txq_lock);
   7767 	}
   7768 
   7769 	kmem_free(sc->sc_queue,
   7770 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7771  fail_0:
   7772 	return error;
   7773 }
   7774 
   7775 /*
   7776  * wm_free_quques:
   7777  *	Free {tx,rx}descs and {tx,rx} buffers
   7778  */
   7779 static void
   7780 wm_free_txrx_queues(struct wm_softc *sc)
   7781 {
   7782 	int i;
   7783 
   7784 	for (i = 0; i < sc->sc_nqueues; i++) {
   7785 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7786 
   7787 #ifdef WM_EVENT_COUNTERS
   7788 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7789 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7790 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7791 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7792 #endif /* WM_EVENT_COUNTERS */
   7793 
   7794 		wm_free_rx_buffer(sc, rxq);
   7795 		wm_free_rx_descs(sc, rxq);
   7796 		if (rxq->rxq_lock)
   7797 			mutex_obj_free(rxq->rxq_lock);
   7798 	}
   7799 
   7800 	for (i = 0; i < sc->sc_nqueues; i++) {
   7801 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7802 		struct mbuf *m;
   7803 #ifdef WM_EVENT_COUNTERS
   7804 		int j;
   7805 
   7806 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7807 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7808 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7809 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7810 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7811 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7812 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7813 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7814 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7815 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7816 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7817 
   7818 		for (j = 0; j < WM_NTXSEGS; j++)
   7819 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7820 
   7821 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7822 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7823 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7824 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7825 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7826 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7827 #endif /* WM_EVENT_COUNTERS */
   7828 
   7829 		/* Drain txq_interq */
   7830 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7831 			m_freem(m);
   7832 		pcq_destroy(txq->txq_interq);
   7833 
   7834 		wm_free_tx_buffer(sc, txq);
   7835 		wm_free_tx_descs(sc, txq);
   7836 		if (txq->txq_lock)
   7837 			mutex_obj_free(txq->txq_lock);
   7838 	}
   7839 
   7840 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7841 }
   7842 
   7843 static void
   7844 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7845 {
   7846 
   7847 	KASSERT(mutex_owned(txq->txq_lock));
   7848 
   7849 	/* Initialize the transmit descriptor ring. */
   7850 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7851 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7852 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7853 	txq->txq_free = WM_NTXDESC(txq);
   7854 	txq->txq_next = 0;
   7855 }
   7856 
   7857 static void
   7858 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7859     struct wm_txqueue *txq)
   7860 {
   7861 
   7862 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7863 		device_xname(sc->sc_dev), __func__));
   7864 	KASSERT(mutex_owned(txq->txq_lock));
   7865 
   7866 	if (sc->sc_type < WM_T_82543) {
   7867 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7868 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7869 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7870 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7871 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7872 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7873 	} else {
   7874 		int qid = wmq->wmq_id;
   7875 
   7876 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7877 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7878 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7879 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7880 
   7881 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7882 			/*
   7883 			 * Don't write TDT before TCTL.EN is set.
   7884 			 * See the document.
   7885 			 */
   7886 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7887 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7888 			    | TXDCTL_WTHRESH(0));
   7889 		else {
   7890 			/* XXX should update with AIM? */
   7891 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7892 			if (sc->sc_type >= WM_T_82540) {
   7893 				/* Should be the same */
   7894 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7895 			}
   7896 
   7897 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7898 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7899 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7900 		}
   7901 	}
   7902 }
   7903 
   7904 static void
   7905 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7906 {
   7907 	int i;
   7908 
   7909 	KASSERT(mutex_owned(txq->txq_lock));
   7910 
   7911 	/* Initialize the transmit job descriptors. */
   7912 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7913 		txq->txq_soft[i].txs_mbuf = NULL;
   7914 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7915 	txq->txq_snext = 0;
   7916 	txq->txq_sdirty = 0;
   7917 }
   7918 
   7919 static void
   7920 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7921     struct wm_txqueue *txq)
   7922 {
   7923 
   7924 	KASSERT(mutex_owned(txq->txq_lock));
   7925 
   7926 	/*
   7927 	 * Set up some register offsets that are different between
   7928 	 * the i82542 and the i82543 and later chips.
   7929 	 */
   7930 	if (sc->sc_type < WM_T_82543)
   7931 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7932 	else
   7933 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7934 
   7935 	wm_init_tx_descs(sc, txq);
   7936 	wm_init_tx_regs(sc, wmq, txq);
   7937 	wm_init_tx_buffer(sc, txq);
   7938 
   7939 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
   7940 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
   7941 
   7942 	txq->txq_sending = false;
   7943 }
   7944 
   7945 static void
   7946 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7947     struct wm_rxqueue *rxq)
   7948 {
   7949 
   7950 	KASSERT(mutex_owned(rxq->rxq_lock));
   7951 
   7952 	/*
   7953 	 * Initialize the receive descriptor and receive job
   7954 	 * descriptor rings.
   7955 	 */
   7956 	if (sc->sc_type < WM_T_82543) {
   7957 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7958 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7959 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7960 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7961 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7962 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7963 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7964 
   7965 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7966 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7967 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7968 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7969 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7970 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7971 	} else {
   7972 		int qid = wmq->wmq_id;
   7973 
   7974 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7975 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7976 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7977 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7978 
   7979 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7980 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7981 				panic("%s: MCLBYTES %d unsupported for 82575 "
   7982 				    "or higher\n", __func__, MCLBYTES);
   7983 
   7984 			/*
   7985 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
   7986 			 * only.
   7987 			 */
   7988 			CSR_WRITE(sc, WMREG_SRRCTL(qid),
   7989 			    SRRCTL_DESCTYPE_ADV_ONEBUF
   7990 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7991 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7992 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7993 			    | RXDCTL_WTHRESH(1));
   7994 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7995 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7996 		} else {
   7997 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7998 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7999 			/* XXX should update with AIM? */
   8000 			CSR_WRITE(sc, WMREG_RDTR,
   8001 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   8002 			/* MUST be same */
   8003 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   8004 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   8005 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   8006 		}
   8007 	}
   8008 }
   8009 
   8010 static int
   8011 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   8012 {
   8013 	struct wm_rxsoft *rxs;
   8014 	int error, i;
   8015 
   8016 	KASSERT(mutex_owned(rxq->rxq_lock));
   8017 
   8018 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8019 		rxs = &rxq->rxq_soft[i];
   8020 		if (rxs->rxs_mbuf == NULL) {
   8021 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   8022 				log(LOG_ERR, "%s: unable to allocate or map "
   8023 				    "rx buffer %d, error = %d\n",
   8024 				    device_xname(sc->sc_dev), i, error);
   8025 				/*
   8026 				 * XXX Should attempt to run with fewer receive
   8027 				 * XXX buffers instead of just failing.
   8028 				 */
   8029 				wm_rxdrain(rxq);
   8030 				return ENOMEM;
   8031 			}
   8032 		} else {
   8033 			/*
   8034 			 * For 82575 and 82576, the RX descriptors must be
   8035 			 * initialized after the setting of RCTL.EN in
   8036 			 * wm_set_filter()
   8037 			 */
   8038 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   8039 				wm_init_rxdesc(rxq, i);
   8040 		}
   8041 	}
   8042 	rxq->rxq_ptr = 0;
   8043 	rxq->rxq_discard = 0;
   8044 	WM_RXCHAIN_RESET(rxq);
   8045 
   8046 	return 0;
   8047 }
   8048 
   8049 static int
   8050 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8051     struct wm_rxqueue *rxq)
   8052 {
   8053 
   8054 	KASSERT(mutex_owned(rxq->rxq_lock));
   8055 
   8056 	/*
   8057 	 * Set up some register offsets that are different between
   8058 	 * the i82542 and the i82543 and later chips.
   8059 	 */
   8060 	if (sc->sc_type < WM_T_82543)
   8061 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   8062 	else
   8063 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   8064 
   8065 	wm_init_rx_regs(sc, wmq, rxq);
   8066 	return wm_init_rx_buffer(sc, rxq);
   8067 }
   8068 
   8069 /*
   8070  * wm_init_quques:
   8071  *	Initialize {tx,rx}descs and {tx,rx} buffers
   8072  */
   8073 static int
   8074 wm_init_txrx_queues(struct wm_softc *sc)
   8075 {
   8076 	int i, error = 0;
   8077 
   8078 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8079 		device_xname(sc->sc_dev), __func__));
   8080 
   8081 	for (i = 0; i < sc->sc_nqueues; i++) {
   8082 		struct wm_queue *wmq = &sc->sc_queue[i];
   8083 		struct wm_txqueue *txq = &wmq->wmq_txq;
   8084 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8085 
   8086 		/*
   8087 		 * TODO
   8088 		 * Currently, use constant variable instead of AIM.
   8089 		 * Furthermore, the interrupt interval of multiqueue which use
   8090 		 * polling mode is less than default value.
   8091 		 * More tuning and AIM are required.
   8092 		 */
   8093 		if (wm_is_using_multiqueue(sc))
   8094 			wmq->wmq_itr = 50;
   8095 		else
   8096 			wmq->wmq_itr = sc->sc_itr_init;
   8097 		wmq->wmq_set_itr = true;
   8098 
   8099 		mutex_enter(txq->txq_lock);
   8100 		wm_init_tx_queue(sc, wmq, txq);
   8101 		mutex_exit(txq->txq_lock);
   8102 
   8103 		mutex_enter(rxq->rxq_lock);
   8104 		error = wm_init_rx_queue(sc, wmq, rxq);
   8105 		mutex_exit(rxq->rxq_lock);
   8106 		if (error)
   8107 			break;
   8108 	}
   8109 
   8110 	return error;
   8111 }
   8112 
   8113 /*
   8114  * wm_tx_offload:
   8115  *
   8116  *	Set up TCP/IP checksumming parameters for the
   8117  *	specified packet.
   8118  */
   8119 static void
   8120 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8121     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   8122 {
   8123 	struct mbuf *m0 = txs->txs_mbuf;
   8124 	struct livengood_tcpip_ctxdesc *t;
   8125 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   8126 	uint32_t ipcse;
   8127 	struct ether_header *eh;
   8128 	int offset, iphl;
   8129 	uint8_t fields;
   8130 
   8131 	/*
   8132 	 * XXX It would be nice if the mbuf pkthdr had offset
   8133 	 * fields for the protocol headers.
   8134 	 */
   8135 
   8136 	eh = mtod(m0, struct ether_header *);
   8137 	switch (htons(eh->ether_type)) {
   8138 	case ETHERTYPE_IP:
   8139 	case ETHERTYPE_IPV6:
   8140 		offset = ETHER_HDR_LEN;
   8141 		break;
   8142 
   8143 	case ETHERTYPE_VLAN:
   8144 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8145 		break;
   8146 
   8147 	default:
   8148 		/* Don't support this protocol or encapsulation. */
   8149 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8150 		txq->txq_last_hw_ipcs = 0;
   8151 		txq->txq_last_hw_tucs = 0;
   8152 		*fieldsp = 0;
   8153 		*cmdp = 0;
   8154 		return;
   8155 	}
   8156 
   8157 	if ((m0->m_pkthdr.csum_flags &
   8158 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8159 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8160 	} else
   8161 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8162 
   8163 	ipcse = offset + iphl - 1;
   8164 
   8165 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   8166 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   8167 	seg = 0;
   8168 	fields = 0;
   8169 
   8170 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8171 		int hlen = offset + iphl;
   8172 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8173 
   8174 		if (__predict_false(m0->m_len <
   8175 				    (hlen + sizeof(struct tcphdr)))) {
   8176 			/*
   8177 			 * TCP/IP headers are not in the first mbuf; we need
   8178 			 * to do this the slow and painful way. Let's just
   8179 			 * hope this doesn't happen very often.
   8180 			 */
   8181 			struct tcphdr th;
   8182 
   8183 			WM_Q_EVCNT_INCR(txq, tsopain);
   8184 
   8185 			m_copydata(m0, hlen, sizeof(th), &th);
   8186 			if (v4) {
   8187 				struct ip ip;
   8188 
   8189 				m_copydata(m0, offset, sizeof(ip), &ip);
   8190 				ip.ip_len = 0;
   8191 				m_copyback(m0,
   8192 				    offset + offsetof(struct ip, ip_len),
   8193 				    sizeof(ip.ip_len), &ip.ip_len);
   8194 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8195 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8196 			} else {
   8197 				struct ip6_hdr ip6;
   8198 
   8199 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8200 				ip6.ip6_plen = 0;
   8201 				m_copyback(m0,
   8202 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8203 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8204 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8205 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8206 			}
   8207 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8208 			    sizeof(th.th_sum), &th.th_sum);
   8209 
   8210 			hlen += th.th_off << 2;
   8211 		} else {
   8212 			/*
   8213 			 * TCP/IP headers are in the first mbuf; we can do
   8214 			 * this the easy way.
   8215 			 */
   8216 			struct tcphdr *th;
   8217 
   8218 			if (v4) {
   8219 				struct ip *ip =
   8220 				    (void *)(mtod(m0, char *) + offset);
   8221 				th = (void *)(mtod(m0, char *) + hlen);
   8222 
   8223 				ip->ip_len = 0;
   8224 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8225 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8226 			} else {
   8227 				struct ip6_hdr *ip6 =
   8228 				    (void *)(mtod(m0, char *) + offset);
   8229 				th = (void *)(mtod(m0, char *) + hlen);
   8230 
   8231 				ip6->ip6_plen = 0;
   8232 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8233 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8234 			}
   8235 			hlen += th->th_off << 2;
   8236 		}
   8237 
   8238 		if (v4) {
   8239 			WM_Q_EVCNT_INCR(txq, tso);
   8240 			cmdlen |= WTX_TCPIP_CMD_IP;
   8241 		} else {
   8242 			WM_Q_EVCNT_INCR(txq, tso6);
   8243 			ipcse = 0;
   8244 		}
   8245 		cmd |= WTX_TCPIP_CMD_TSE;
   8246 		cmdlen |= WTX_TCPIP_CMD_TSE |
   8247 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   8248 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   8249 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   8250 	}
   8251 
   8252 	/*
   8253 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   8254 	 * offload feature, if we load the context descriptor, we
   8255 	 * MUST provide valid values for IPCSS and TUCSS fields.
   8256 	 */
   8257 
   8258 	ipcs = WTX_TCPIP_IPCSS(offset) |
   8259 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   8260 	    WTX_TCPIP_IPCSE(ipcse);
   8261 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   8262 		WM_Q_EVCNT_INCR(txq, ipsum);
   8263 		fields |= WTX_IXSM;
   8264 	}
   8265 
   8266 	offset += iphl;
   8267 
   8268 	if (m0->m_pkthdr.csum_flags &
   8269 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   8270 		WM_Q_EVCNT_INCR(txq, tusum);
   8271 		fields |= WTX_TXSM;
   8272 		tucs = WTX_TCPIP_TUCSS(offset) |
   8273 		    WTX_TCPIP_TUCSO(offset +
   8274 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   8275 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8276 	} else if ((m0->m_pkthdr.csum_flags &
   8277 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   8278 		WM_Q_EVCNT_INCR(txq, tusum6);
   8279 		fields |= WTX_TXSM;
   8280 		tucs = WTX_TCPIP_TUCSS(offset) |
   8281 		    WTX_TCPIP_TUCSO(offset +
   8282 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   8283 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8284 	} else {
   8285 		/* Just initialize it to a valid TCP context. */
   8286 		tucs = WTX_TCPIP_TUCSS(offset) |
   8287 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   8288 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8289 	}
   8290 
   8291 	*cmdp = cmd;
   8292 	*fieldsp = fields;
   8293 
   8294 	/*
   8295 	 * We don't have to write context descriptor for every packet
   8296 	 * except for 82574. For 82574, we must write context descriptor
   8297 	 * for every packet when we use two descriptor queues.
   8298 	 *
   8299 	 * The 82574L can only remember the *last* context used
   8300 	 * regardless of queue that it was use for.  We cannot reuse
   8301 	 * contexts on this hardware platform and must generate a new
   8302 	 * context every time.  82574L hardware spec, section 7.2.6,
   8303 	 * second note.
   8304 	 */
   8305 	if (sc->sc_nqueues < 2) {
   8306 		/*
   8307 		 * Setting up new checksum offload context for every
   8308 		 * frames takes a lot of processing time for hardware.
   8309 		 * This also reduces performance a lot for small sized
   8310 		 * frames so avoid it if driver can use previously
   8311 		 * configured checksum offload context.
   8312 		 * For TSO, in theory we can use the same TSO context only if
   8313 		 * frame is the same type(IP/TCP) and the same MSS. However
   8314 		 * checking whether a frame has the same IP/TCP structure is a
   8315 		 * hard thing so just ignore that and always restablish a
   8316 		 * new TSO context.
   8317 		 */
   8318 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   8319 		    == 0) {
   8320 			if (txq->txq_last_hw_cmd == cmd &&
   8321 			    txq->txq_last_hw_fields == fields &&
   8322 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   8323 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   8324 				WM_Q_EVCNT_INCR(txq, skipcontext);
   8325 				return;
   8326 			}
   8327 		}
   8328 
   8329 		txq->txq_last_hw_cmd = cmd;
   8330 		txq->txq_last_hw_fields = fields;
   8331 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   8332 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   8333 	}
   8334 
   8335 	/* Fill in the context descriptor. */
   8336 	t = (struct livengood_tcpip_ctxdesc *)
   8337 	    &txq->txq_descs[txq->txq_next];
   8338 	t->tcpip_ipcs = htole32(ipcs);
   8339 	t->tcpip_tucs = htole32(tucs);
   8340 	t->tcpip_cmdlen = htole32(cmdlen);
   8341 	t->tcpip_seg = htole32(seg);
   8342 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8343 
   8344 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8345 	txs->txs_ndesc++;
   8346 }
   8347 
   8348 static inline int
   8349 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   8350 {
   8351 	struct wm_softc *sc = ifp->if_softc;
   8352 	u_int cpuid = cpu_index(curcpu());
   8353 
   8354 	/*
   8355 	 * Currently, simple distribute strategy.
   8356 	 * TODO:
   8357 	 * distribute by flowid(RSS has value).
   8358 	 */
   8359 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   8360 }
   8361 
   8362 static inline bool
   8363 wm_linkdown_discard(struct wm_txqueue *txq)
   8364 {
   8365 
   8366 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   8367 		return true;
   8368 
   8369 	return false;
   8370 }
   8371 
   8372 /*
   8373  * wm_start:		[ifnet interface function]
   8374  *
   8375  *	Start packet transmission on the interface.
   8376  */
   8377 static void
   8378 wm_start(struct ifnet *ifp)
   8379 {
   8380 	struct wm_softc *sc = ifp->if_softc;
   8381 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8382 
   8383 #ifdef WM_MPSAFE
   8384 	KASSERT(if_is_mpsafe(ifp));
   8385 #endif
   8386 	/*
   8387 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8388 	 */
   8389 
   8390 	mutex_enter(txq->txq_lock);
   8391 	if (!txq->txq_stopping)
   8392 		wm_start_locked(ifp);
   8393 	mutex_exit(txq->txq_lock);
   8394 }
   8395 
   8396 static void
   8397 wm_start_locked(struct ifnet *ifp)
   8398 {
   8399 	struct wm_softc *sc = ifp->if_softc;
   8400 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8401 
   8402 	wm_send_common_locked(ifp, txq, false);
   8403 }
   8404 
   8405 static int
   8406 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   8407 {
   8408 	int qid;
   8409 	struct wm_softc *sc = ifp->if_softc;
   8410 	struct wm_txqueue *txq;
   8411 
   8412 	qid = wm_select_txqueue(ifp, m);
   8413 	txq = &sc->sc_queue[qid].wmq_txq;
   8414 
   8415 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8416 		m_freem(m);
   8417 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8418 		return ENOBUFS;
   8419 	}
   8420 
   8421 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8422 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8423 	if (m->m_flags & M_MCAST)
   8424 		if_statinc_ref(nsr, if_omcasts);
   8425 	IF_STAT_PUTREF(ifp);
   8426 
   8427 	if (mutex_tryenter(txq->txq_lock)) {
   8428 		if (!txq->txq_stopping)
   8429 			wm_transmit_locked(ifp, txq);
   8430 		mutex_exit(txq->txq_lock);
   8431 	}
   8432 
   8433 	return 0;
   8434 }
   8435 
   8436 static void
   8437 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8438 {
   8439 
   8440 	wm_send_common_locked(ifp, txq, true);
   8441 }
   8442 
   8443 static void
   8444 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8445     bool is_transmit)
   8446 {
   8447 	struct wm_softc *sc = ifp->if_softc;
   8448 	struct mbuf *m0;
   8449 	struct wm_txsoft *txs;
   8450 	bus_dmamap_t dmamap;
   8451 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   8452 	bus_addr_t curaddr;
   8453 	bus_size_t seglen, curlen;
   8454 	uint32_t cksumcmd;
   8455 	uint8_t cksumfields;
   8456 	bool remap = true;
   8457 
   8458 	KASSERT(mutex_owned(txq->txq_lock));
   8459 	KASSERT(!txq->txq_stopping);
   8460 
   8461 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8462 		return;
   8463 
   8464 	if (__predict_false(wm_linkdown_discard(txq))) {
   8465 		do {
   8466 			if (is_transmit)
   8467 				m0 = pcq_get(txq->txq_interq);
   8468 			else
   8469 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8470 			/*
   8471 			 * increment successed packet counter as in the case
   8472 			 * which the packet is discarded by link down PHY.
   8473 			 */
   8474 			if (m0 != NULL) {
   8475 				if_statinc(ifp, if_opackets);
   8476 				m_freem(m0);
   8477 			}
   8478 		} while (m0 != NULL);
   8479 		return;
   8480 	}
   8481 
   8482 	/* Remember the previous number of free descriptors. */
   8483 	ofree = txq->txq_free;
   8484 
   8485 	/*
   8486 	 * Loop through the send queue, setting up transmit descriptors
   8487 	 * until we drain the queue, or use up all available transmit
   8488 	 * descriptors.
   8489 	 */
   8490 	for (;;) {
   8491 		m0 = NULL;
   8492 
   8493 		/* Get a work queue entry. */
   8494 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8495 			wm_txeof(txq, UINT_MAX);
   8496 			if (txq->txq_sfree == 0) {
   8497 				DPRINTF(sc, WM_DEBUG_TX,
   8498 				    ("%s: TX: no free job descriptors\n",
   8499 					device_xname(sc->sc_dev)));
   8500 				WM_Q_EVCNT_INCR(txq, txsstall);
   8501 				break;
   8502 			}
   8503 		}
   8504 
   8505 		/* Grab a packet off the queue. */
   8506 		if (is_transmit)
   8507 			m0 = pcq_get(txq->txq_interq);
   8508 		else
   8509 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8510 		if (m0 == NULL)
   8511 			break;
   8512 
   8513 		DPRINTF(sc, WM_DEBUG_TX,
   8514 		    ("%s: TX: have packet to transmit: %p\n",
   8515 			device_xname(sc->sc_dev), m0));
   8516 
   8517 		txs = &txq->txq_soft[txq->txq_snext];
   8518 		dmamap = txs->txs_dmamap;
   8519 
   8520 		use_tso = (m0->m_pkthdr.csum_flags &
   8521 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8522 
   8523 		/*
   8524 		 * So says the Linux driver:
   8525 		 * The controller does a simple calculation to make sure
   8526 		 * there is enough room in the FIFO before initiating the
   8527 		 * DMA for each buffer. The calc is:
   8528 		 *	4 = ceil(buffer len / MSS)
   8529 		 * To make sure we don't overrun the FIFO, adjust the max
   8530 		 * buffer len if the MSS drops.
   8531 		 */
   8532 		dmamap->dm_maxsegsz =
   8533 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8534 		    ? m0->m_pkthdr.segsz << 2
   8535 		    : WTX_MAX_LEN;
   8536 
   8537 		/*
   8538 		 * Load the DMA map.  If this fails, the packet either
   8539 		 * didn't fit in the allotted number of segments, or we
   8540 		 * were short on resources.  For the too-many-segments
   8541 		 * case, we simply report an error and drop the packet,
   8542 		 * since we can't sanely copy a jumbo packet to a single
   8543 		 * buffer.
   8544 		 */
   8545 retry:
   8546 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8547 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8548 		if (__predict_false(error)) {
   8549 			if (error == EFBIG) {
   8550 				if (remap == true) {
   8551 					struct mbuf *m;
   8552 
   8553 					remap = false;
   8554 					m = m_defrag(m0, M_NOWAIT);
   8555 					if (m != NULL) {
   8556 						WM_Q_EVCNT_INCR(txq, defrag);
   8557 						m0 = m;
   8558 						goto retry;
   8559 					}
   8560 				}
   8561 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8562 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8563 				    "DMA segments, dropping...\n",
   8564 				    device_xname(sc->sc_dev));
   8565 				wm_dump_mbuf_chain(sc, m0);
   8566 				m_freem(m0);
   8567 				continue;
   8568 			}
   8569 			/* Short on resources, just stop for now. */
   8570 			DPRINTF(sc, WM_DEBUG_TX,
   8571 			    ("%s: TX: dmamap load failed: %d\n",
   8572 				device_xname(sc->sc_dev), error));
   8573 			break;
   8574 		}
   8575 
   8576 		segs_needed = dmamap->dm_nsegs;
   8577 		if (use_tso) {
   8578 			/* For sentinel descriptor; see below. */
   8579 			segs_needed++;
   8580 		}
   8581 
   8582 		/*
   8583 		 * Ensure we have enough descriptors free to describe
   8584 		 * the packet. Note, we always reserve one descriptor
   8585 		 * at the end of the ring due to the semantics of the
   8586 		 * TDT register, plus one more in the event we need
   8587 		 * to load offload context.
   8588 		 */
   8589 		if (segs_needed > txq->txq_free - 2) {
   8590 			/*
   8591 			 * Not enough free descriptors to transmit this
   8592 			 * packet.  We haven't committed anything yet,
   8593 			 * so just unload the DMA map, put the packet
   8594 			 * pack on the queue, and punt. Notify the upper
   8595 			 * layer that there are no more slots left.
   8596 			 */
   8597 			DPRINTF(sc, WM_DEBUG_TX,
   8598 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8599 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8600 				segs_needed, txq->txq_free - 1));
   8601 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8602 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8603 			WM_Q_EVCNT_INCR(txq, txdstall);
   8604 			break;
   8605 		}
   8606 
   8607 		/*
   8608 		 * Check for 82547 Tx FIFO bug. We need to do this
   8609 		 * once we know we can transmit the packet, since we
   8610 		 * do some internal FIFO space accounting here.
   8611 		 */
   8612 		if (sc->sc_type == WM_T_82547 &&
   8613 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8614 			DPRINTF(sc, WM_DEBUG_TX,
   8615 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8616 				device_xname(sc->sc_dev)));
   8617 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8618 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8619 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8620 			break;
   8621 		}
   8622 
   8623 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8624 
   8625 		DPRINTF(sc, WM_DEBUG_TX,
   8626 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8627 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8628 
   8629 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8630 
   8631 		/*
   8632 		 * Store a pointer to the packet so that we can free it
   8633 		 * later.
   8634 		 *
   8635 		 * Initially, we consider the number of descriptors the
   8636 		 * packet uses the number of DMA segments.  This may be
   8637 		 * incremented by 1 if we do checksum offload (a descriptor
   8638 		 * is used to set the checksum context).
   8639 		 */
   8640 		txs->txs_mbuf = m0;
   8641 		txs->txs_firstdesc = txq->txq_next;
   8642 		txs->txs_ndesc = segs_needed;
   8643 
   8644 		/* Set up offload parameters for this packet. */
   8645 		if (m0->m_pkthdr.csum_flags &
   8646 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8647 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8648 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8649 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8650 		} else {
   8651 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8652 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8653 			cksumcmd = 0;
   8654 			cksumfields = 0;
   8655 		}
   8656 
   8657 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8658 
   8659 		/* Sync the DMA map. */
   8660 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8661 		    BUS_DMASYNC_PREWRITE);
   8662 
   8663 		/* Initialize the transmit descriptor. */
   8664 		for (nexttx = txq->txq_next, seg = 0;
   8665 		     seg < dmamap->dm_nsegs; seg++) {
   8666 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8667 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8668 			     seglen != 0;
   8669 			     curaddr += curlen, seglen -= curlen,
   8670 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8671 				curlen = seglen;
   8672 
   8673 				/*
   8674 				 * So says the Linux driver:
   8675 				 * Work around for premature descriptor
   8676 				 * write-backs in TSO mode.  Append a
   8677 				 * 4-byte sentinel descriptor.
   8678 				 */
   8679 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8680 				    curlen > 8)
   8681 					curlen -= 4;
   8682 
   8683 				wm_set_dma_addr(
   8684 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8685 				txq->txq_descs[nexttx].wtx_cmdlen
   8686 				    = htole32(cksumcmd | curlen);
   8687 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8688 				    = 0;
   8689 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8690 				    = cksumfields;
   8691 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8692 				lasttx = nexttx;
   8693 
   8694 				DPRINTF(sc, WM_DEBUG_TX,
   8695 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8696 					"len %#04zx\n",
   8697 					device_xname(sc->sc_dev), nexttx,
   8698 					(uint64_t)curaddr, curlen));
   8699 			}
   8700 		}
   8701 
   8702 		KASSERT(lasttx != -1);
   8703 
   8704 		/*
   8705 		 * Set up the command byte on the last descriptor of
   8706 		 * the packet. If we're in the interrupt delay window,
   8707 		 * delay the interrupt.
   8708 		 */
   8709 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8710 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8711 
   8712 		/*
   8713 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8714 		 * up the descriptor to encapsulate the packet for us.
   8715 		 *
   8716 		 * This is only valid on the last descriptor of the packet.
   8717 		 */
   8718 		if (vlan_has_tag(m0)) {
   8719 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8720 			    htole32(WTX_CMD_VLE);
   8721 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8722 			    = htole16(vlan_get_tag(m0));
   8723 		}
   8724 
   8725 		txs->txs_lastdesc = lasttx;
   8726 
   8727 		DPRINTF(sc, WM_DEBUG_TX,
   8728 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8729 			device_xname(sc->sc_dev),
   8730 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8731 
   8732 		/* Sync the descriptors we're using. */
   8733 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8734 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8735 
   8736 		/* Give the packet to the chip. */
   8737 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8738 
   8739 		DPRINTF(sc, WM_DEBUG_TX,
   8740 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8741 
   8742 		DPRINTF(sc, WM_DEBUG_TX,
   8743 		    ("%s: TX: finished transmitting packet, job %d\n",
   8744 			device_xname(sc->sc_dev), txq->txq_snext));
   8745 
   8746 		/* Advance the tx pointer. */
   8747 		txq->txq_free -= txs->txs_ndesc;
   8748 		txq->txq_next = nexttx;
   8749 
   8750 		txq->txq_sfree--;
   8751 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8752 
   8753 		/* Pass the packet to any BPF listeners. */
   8754 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8755 	}
   8756 
   8757 	if (m0 != NULL) {
   8758 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8759 		WM_Q_EVCNT_INCR(txq, descdrop);
   8760 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8761 			__func__));
   8762 		m_freem(m0);
   8763 	}
   8764 
   8765 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8766 		/* No more slots; notify upper layer. */
   8767 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8768 	}
   8769 
   8770 	if (txq->txq_free != ofree) {
   8771 		/* Set a watchdog timer in case the chip flakes out. */
   8772 		txq->txq_lastsent = time_uptime;
   8773 		txq->txq_sending = true;
   8774 	}
   8775 }
   8776 
   8777 /*
   8778  * wm_nq_tx_offload:
   8779  *
   8780  *	Set up TCP/IP checksumming parameters for the
   8781  *	specified packet, for NEWQUEUE devices
   8782  */
   8783 static void
   8784 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8785     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8786 {
   8787 	struct mbuf *m0 = txs->txs_mbuf;
   8788 	uint32_t vl_len, mssidx, cmdc;
   8789 	struct ether_header *eh;
   8790 	int offset, iphl;
   8791 
   8792 	/*
   8793 	 * XXX It would be nice if the mbuf pkthdr had offset
   8794 	 * fields for the protocol headers.
   8795 	 */
   8796 	*cmdlenp = 0;
   8797 	*fieldsp = 0;
   8798 
   8799 	eh = mtod(m0, struct ether_header *);
   8800 	switch (htons(eh->ether_type)) {
   8801 	case ETHERTYPE_IP:
   8802 	case ETHERTYPE_IPV6:
   8803 		offset = ETHER_HDR_LEN;
   8804 		break;
   8805 
   8806 	case ETHERTYPE_VLAN:
   8807 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8808 		break;
   8809 
   8810 	default:
   8811 		/* Don't support this protocol or encapsulation. */
   8812 		*do_csum = false;
   8813 		return;
   8814 	}
   8815 	*do_csum = true;
   8816 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8817 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8818 
   8819 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8820 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8821 
   8822 	if ((m0->m_pkthdr.csum_flags &
   8823 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8824 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8825 	} else {
   8826 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8827 	}
   8828 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8829 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8830 
   8831 	if (vlan_has_tag(m0)) {
   8832 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8833 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8834 		*cmdlenp |= NQTX_CMD_VLE;
   8835 	}
   8836 
   8837 	mssidx = 0;
   8838 
   8839 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8840 		int hlen = offset + iphl;
   8841 		int tcp_hlen;
   8842 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8843 
   8844 		if (__predict_false(m0->m_len <
   8845 				    (hlen + sizeof(struct tcphdr)))) {
   8846 			/*
   8847 			 * TCP/IP headers are not in the first mbuf; we need
   8848 			 * to do this the slow and painful way. Let's just
   8849 			 * hope this doesn't happen very often.
   8850 			 */
   8851 			struct tcphdr th;
   8852 
   8853 			WM_Q_EVCNT_INCR(txq, tsopain);
   8854 
   8855 			m_copydata(m0, hlen, sizeof(th), &th);
   8856 			if (v4) {
   8857 				struct ip ip;
   8858 
   8859 				m_copydata(m0, offset, sizeof(ip), &ip);
   8860 				ip.ip_len = 0;
   8861 				m_copyback(m0,
   8862 				    offset + offsetof(struct ip, ip_len),
   8863 				    sizeof(ip.ip_len), &ip.ip_len);
   8864 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8865 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8866 			} else {
   8867 				struct ip6_hdr ip6;
   8868 
   8869 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8870 				ip6.ip6_plen = 0;
   8871 				m_copyback(m0,
   8872 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8873 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8874 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8875 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8876 			}
   8877 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8878 			    sizeof(th.th_sum), &th.th_sum);
   8879 
   8880 			tcp_hlen = th.th_off << 2;
   8881 		} else {
   8882 			/*
   8883 			 * TCP/IP headers are in the first mbuf; we can do
   8884 			 * this the easy way.
   8885 			 */
   8886 			struct tcphdr *th;
   8887 
   8888 			if (v4) {
   8889 				struct ip *ip =
   8890 				    (void *)(mtod(m0, char *) + offset);
   8891 				th = (void *)(mtod(m0, char *) + hlen);
   8892 
   8893 				ip->ip_len = 0;
   8894 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8895 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8896 			} else {
   8897 				struct ip6_hdr *ip6 =
   8898 				    (void *)(mtod(m0, char *) + offset);
   8899 				th = (void *)(mtod(m0, char *) + hlen);
   8900 
   8901 				ip6->ip6_plen = 0;
   8902 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8903 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8904 			}
   8905 			tcp_hlen = th->th_off << 2;
   8906 		}
   8907 		hlen += tcp_hlen;
   8908 		*cmdlenp |= NQTX_CMD_TSE;
   8909 
   8910 		if (v4) {
   8911 			WM_Q_EVCNT_INCR(txq, tso);
   8912 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8913 		} else {
   8914 			WM_Q_EVCNT_INCR(txq, tso6);
   8915 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8916 		}
   8917 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8918 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8919 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8920 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8921 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8922 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8923 	} else {
   8924 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8925 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8926 	}
   8927 
   8928 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8929 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8930 		cmdc |= NQTXC_CMD_IP4;
   8931 	}
   8932 
   8933 	if (m0->m_pkthdr.csum_flags &
   8934 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8935 		WM_Q_EVCNT_INCR(txq, tusum);
   8936 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8937 			cmdc |= NQTXC_CMD_TCP;
   8938 		else
   8939 			cmdc |= NQTXC_CMD_UDP;
   8940 
   8941 		cmdc |= NQTXC_CMD_IP4;
   8942 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8943 	}
   8944 	if (m0->m_pkthdr.csum_flags &
   8945 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8946 		WM_Q_EVCNT_INCR(txq, tusum6);
   8947 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8948 			cmdc |= NQTXC_CMD_TCP;
   8949 		else
   8950 			cmdc |= NQTXC_CMD_UDP;
   8951 
   8952 		cmdc |= NQTXC_CMD_IP6;
   8953 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8954 	}
   8955 
   8956 	/*
   8957 	 * We don't have to write context descriptor for every packet to
   8958 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8959 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8960 	 * controllers.
   8961 	 * It would be overhead to write context descriptor for every packet,
   8962 	 * however it does not cause problems.
   8963 	 */
   8964 	/* Fill in the context descriptor. */
   8965 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8966 	    htole32(vl_len);
   8967 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8968 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8969 	    htole32(cmdc);
   8970 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8971 	    htole32(mssidx);
   8972 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8973 	DPRINTF(sc, WM_DEBUG_TX,
   8974 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8975 		txq->txq_next, 0, vl_len));
   8976 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8977 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8978 	txs->txs_ndesc++;
   8979 }
   8980 
   8981 /*
   8982  * wm_nq_start:		[ifnet interface function]
   8983  *
   8984  *	Start packet transmission on the interface for NEWQUEUE devices
   8985  */
   8986 static void
   8987 wm_nq_start(struct ifnet *ifp)
   8988 {
   8989 	struct wm_softc *sc = ifp->if_softc;
   8990 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8991 
   8992 #ifdef WM_MPSAFE
   8993 	KASSERT(if_is_mpsafe(ifp));
   8994 #endif
   8995 	/*
   8996 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8997 	 */
   8998 
   8999 	mutex_enter(txq->txq_lock);
   9000 	if (!txq->txq_stopping)
   9001 		wm_nq_start_locked(ifp);
   9002 	mutex_exit(txq->txq_lock);
   9003 }
   9004 
   9005 static void
   9006 wm_nq_start_locked(struct ifnet *ifp)
   9007 {
   9008 	struct wm_softc *sc = ifp->if_softc;
   9009 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   9010 
   9011 	wm_nq_send_common_locked(ifp, txq, false);
   9012 }
   9013 
   9014 static int
   9015 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   9016 {
   9017 	int qid;
   9018 	struct wm_softc *sc = ifp->if_softc;
   9019 	struct wm_txqueue *txq;
   9020 
   9021 	qid = wm_select_txqueue(ifp, m);
   9022 	txq = &sc->sc_queue[qid].wmq_txq;
   9023 
   9024 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   9025 		m_freem(m);
   9026 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   9027 		return ENOBUFS;
   9028 	}
   9029 
   9030 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   9031 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   9032 	if (m->m_flags & M_MCAST)
   9033 		if_statinc_ref(nsr, if_omcasts);
   9034 	IF_STAT_PUTREF(ifp);
   9035 
   9036 	/*
   9037 	 * The situations which this mutex_tryenter() fails at running time
   9038 	 * are below two patterns.
   9039 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   9040 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   9041 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   9042 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   9043 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   9044 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   9045 	 * stuck, either.
   9046 	 */
   9047 	if (mutex_tryenter(txq->txq_lock)) {
   9048 		if (!txq->txq_stopping)
   9049 			wm_nq_transmit_locked(ifp, txq);
   9050 		mutex_exit(txq->txq_lock);
   9051 	}
   9052 
   9053 	return 0;
   9054 }
   9055 
   9056 static void
   9057 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   9058 {
   9059 
   9060 	wm_nq_send_common_locked(ifp, txq, true);
   9061 }
   9062 
   9063 static void
   9064 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   9065     bool is_transmit)
   9066 {
   9067 	struct wm_softc *sc = ifp->if_softc;
   9068 	struct mbuf *m0;
   9069 	struct wm_txsoft *txs;
   9070 	bus_dmamap_t dmamap;
   9071 	int error, nexttx, lasttx = -1, seg, segs_needed;
   9072 	bool do_csum, sent;
   9073 	bool remap = true;
   9074 
   9075 	KASSERT(mutex_owned(txq->txq_lock));
   9076 	KASSERT(!txq->txq_stopping);
   9077 
   9078 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   9079 		return;
   9080 
   9081 	if (__predict_false(wm_linkdown_discard(txq))) {
   9082 		do {
   9083 			if (is_transmit)
   9084 				m0 = pcq_get(txq->txq_interq);
   9085 			else
   9086 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   9087 			/*
   9088 			 * increment successed packet counter as in the case
   9089 			 * which the packet is discarded by link down PHY.
   9090 			 */
   9091 			if (m0 != NULL) {
   9092 				if_statinc(ifp, if_opackets);
   9093 				m_freem(m0);
   9094 			}
   9095 		} while (m0 != NULL);
   9096 		return;
   9097 	}
   9098 
   9099 	sent = false;
   9100 
   9101 	/*
   9102 	 * Loop through the send queue, setting up transmit descriptors
   9103 	 * until we drain the queue, or use up all available transmit
   9104 	 * descriptors.
   9105 	 */
   9106 	for (;;) {
   9107 		m0 = NULL;
   9108 
   9109 		/* Get a work queue entry. */
   9110 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   9111 			wm_txeof(txq, UINT_MAX);
   9112 			if (txq->txq_sfree == 0) {
   9113 				DPRINTF(sc, WM_DEBUG_TX,
   9114 				    ("%s: TX: no free job descriptors\n",
   9115 					device_xname(sc->sc_dev)));
   9116 				WM_Q_EVCNT_INCR(txq, txsstall);
   9117 				break;
   9118 			}
   9119 		}
   9120 
   9121 		/* Grab a packet off the queue. */
   9122 		if (is_transmit)
   9123 			m0 = pcq_get(txq->txq_interq);
   9124 		else
   9125 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   9126 		if (m0 == NULL)
   9127 			break;
   9128 
   9129 		DPRINTF(sc, WM_DEBUG_TX,
   9130 		    ("%s: TX: have packet to transmit: %p\n",
   9131 		    device_xname(sc->sc_dev), m0));
   9132 
   9133 		txs = &txq->txq_soft[txq->txq_snext];
   9134 		dmamap = txs->txs_dmamap;
   9135 
   9136 		/*
   9137 		 * Load the DMA map.  If this fails, the packet either
   9138 		 * didn't fit in the allotted number of segments, or we
   9139 		 * were short on resources.  For the too-many-segments
   9140 		 * case, we simply report an error and drop the packet,
   9141 		 * since we can't sanely copy a jumbo packet to a single
   9142 		 * buffer.
   9143 		 */
   9144 retry:
   9145 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   9146 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   9147 		if (__predict_false(error)) {
   9148 			if (error == EFBIG) {
   9149 				if (remap == true) {
   9150 					struct mbuf *m;
   9151 
   9152 					remap = false;
   9153 					m = m_defrag(m0, M_NOWAIT);
   9154 					if (m != NULL) {
   9155 						WM_Q_EVCNT_INCR(txq, defrag);
   9156 						m0 = m;
   9157 						goto retry;
   9158 					}
   9159 				}
   9160 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   9161 				log(LOG_ERR, "%s: Tx packet consumes too many "
   9162 				    "DMA segments, dropping...\n",
   9163 				    device_xname(sc->sc_dev));
   9164 				wm_dump_mbuf_chain(sc, m0);
   9165 				m_freem(m0);
   9166 				continue;
   9167 			}
   9168 			/* Short on resources, just stop for now. */
   9169 			DPRINTF(sc, WM_DEBUG_TX,
   9170 			    ("%s: TX: dmamap load failed: %d\n",
   9171 				device_xname(sc->sc_dev), error));
   9172 			break;
   9173 		}
   9174 
   9175 		segs_needed = dmamap->dm_nsegs;
   9176 
   9177 		/*
   9178 		 * Ensure we have enough descriptors free to describe
   9179 		 * the packet. Note, we always reserve one descriptor
   9180 		 * at the end of the ring due to the semantics of the
   9181 		 * TDT register, plus one more in the event we need
   9182 		 * to load offload context.
   9183 		 */
   9184 		if (segs_needed > txq->txq_free - 2) {
   9185 			/*
   9186 			 * Not enough free descriptors to transmit this
   9187 			 * packet.  We haven't committed anything yet,
   9188 			 * so just unload the DMA map, put the packet
   9189 			 * pack on the queue, and punt. Notify the upper
   9190 			 * layer that there are no more slots left.
   9191 			 */
   9192 			DPRINTF(sc, WM_DEBUG_TX,
   9193 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9194 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9195 				segs_needed, txq->txq_free - 1));
   9196 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9197 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9198 			WM_Q_EVCNT_INCR(txq, txdstall);
   9199 			break;
   9200 		}
   9201 
   9202 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9203 
   9204 		DPRINTF(sc, WM_DEBUG_TX,
   9205 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9206 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9207 
   9208 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9209 
   9210 		/*
   9211 		 * Store a pointer to the packet so that we can free it
   9212 		 * later.
   9213 		 *
   9214 		 * Initially, we consider the number of descriptors the
   9215 		 * packet uses the number of DMA segments.  This may be
   9216 		 * incremented by 1 if we do checksum offload (a descriptor
   9217 		 * is used to set the checksum context).
   9218 		 */
   9219 		txs->txs_mbuf = m0;
   9220 		txs->txs_firstdesc = txq->txq_next;
   9221 		txs->txs_ndesc = segs_needed;
   9222 
   9223 		/* Set up offload parameters for this packet. */
   9224 		uint32_t cmdlen, fields, dcmdlen;
   9225 		if (m0->m_pkthdr.csum_flags &
   9226 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9227 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9228 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9229 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   9230 			    &do_csum);
   9231 		} else {
   9232 			do_csum = false;
   9233 			cmdlen = 0;
   9234 			fields = 0;
   9235 		}
   9236 
   9237 		/* Sync the DMA map. */
   9238 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9239 		    BUS_DMASYNC_PREWRITE);
   9240 
   9241 		/* Initialize the first transmit descriptor. */
   9242 		nexttx = txq->txq_next;
   9243 		if (!do_csum) {
   9244 			/* Set up a legacy descriptor */
   9245 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   9246 			    dmamap->dm_segs[0].ds_addr);
   9247 			txq->txq_descs[nexttx].wtx_cmdlen =
   9248 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   9249 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   9250 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   9251 			if (vlan_has_tag(m0)) {
   9252 				txq->txq_descs[nexttx].wtx_cmdlen |=
   9253 				    htole32(WTX_CMD_VLE);
   9254 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   9255 				    htole16(vlan_get_tag(m0));
   9256 			} else
   9257 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9258 
   9259 			dcmdlen = 0;
   9260 		} else {
   9261 			/* Set up an advanced data descriptor */
   9262 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9263 			    htole64(dmamap->dm_segs[0].ds_addr);
   9264 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   9265 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9266 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   9267 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   9268 			    htole32(fields);
   9269 			DPRINTF(sc, WM_DEBUG_TX,
   9270 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   9271 				device_xname(sc->sc_dev), nexttx,
   9272 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   9273 			DPRINTF(sc, WM_DEBUG_TX,
   9274 			    ("\t 0x%08x%08x\n", fields,
   9275 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   9276 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   9277 		}
   9278 
   9279 		lasttx = nexttx;
   9280 		nexttx = WM_NEXTTX(txq, nexttx);
   9281 		/*
   9282 		 * Fill in the next descriptors. Legacy or advanced format
   9283 		 * is the same here.
   9284 		 */
   9285 		for (seg = 1; seg < dmamap->dm_nsegs;
   9286 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   9287 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9288 			    htole64(dmamap->dm_segs[seg].ds_addr);
   9289 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9290 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   9291 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   9292 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   9293 			lasttx = nexttx;
   9294 
   9295 			DPRINTF(sc, WM_DEBUG_TX,
   9296 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   9297 				device_xname(sc->sc_dev), nexttx,
   9298 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   9299 				dmamap->dm_segs[seg].ds_len));
   9300 		}
   9301 
   9302 		KASSERT(lasttx != -1);
   9303 
   9304 		/*
   9305 		 * Set up the command byte on the last descriptor of
   9306 		 * the packet. If we're in the interrupt delay window,
   9307 		 * delay the interrupt.
   9308 		 */
   9309 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   9310 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   9311 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9312 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9313 
   9314 		txs->txs_lastdesc = lasttx;
   9315 
   9316 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9317 		    device_xname(sc->sc_dev),
   9318 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9319 
   9320 		/* Sync the descriptors we're using. */
   9321 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9322 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9323 
   9324 		/* Give the packet to the chip. */
   9325 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9326 		sent = true;
   9327 
   9328 		DPRINTF(sc, WM_DEBUG_TX,
   9329 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9330 
   9331 		DPRINTF(sc, WM_DEBUG_TX,
   9332 		    ("%s: TX: finished transmitting packet, job %d\n",
   9333 			device_xname(sc->sc_dev), txq->txq_snext));
   9334 
   9335 		/* Advance the tx pointer. */
   9336 		txq->txq_free -= txs->txs_ndesc;
   9337 		txq->txq_next = nexttx;
   9338 
   9339 		txq->txq_sfree--;
   9340 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9341 
   9342 		/* Pass the packet to any BPF listeners. */
   9343 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9344 	}
   9345 
   9346 	if (m0 != NULL) {
   9347 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9348 		WM_Q_EVCNT_INCR(txq, descdrop);
   9349 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9350 			__func__));
   9351 		m_freem(m0);
   9352 	}
   9353 
   9354 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9355 		/* No more slots; notify upper layer. */
   9356 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9357 	}
   9358 
   9359 	if (sent) {
   9360 		/* Set a watchdog timer in case the chip flakes out. */
   9361 		txq->txq_lastsent = time_uptime;
   9362 		txq->txq_sending = true;
   9363 	}
   9364 }
   9365 
   9366 static void
   9367 wm_deferred_start_locked(struct wm_txqueue *txq)
   9368 {
   9369 	struct wm_softc *sc = txq->txq_sc;
   9370 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9371 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   9372 	int qid = wmq->wmq_id;
   9373 
   9374 	KASSERT(mutex_owned(txq->txq_lock));
   9375 	KASSERT(!txq->txq_stopping);
   9376 
   9377 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   9378 		/* XXX need for ALTQ or one CPU system */
   9379 		if (qid == 0)
   9380 			wm_nq_start_locked(ifp);
   9381 		wm_nq_transmit_locked(ifp, txq);
   9382 	} else {
   9383 		/* XXX need for ALTQ or one CPU system */
   9384 		if (qid == 0)
   9385 			wm_start_locked(ifp);
   9386 		wm_transmit_locked(ifp, txq);
   9387 	}
   9388 }
   9389 
   9390 /* Interrupt */
   9391 
   9392 /*
   9393  * wm_txeof:
   9394  *
   9395  *	Helper; handle transmit interrupts.
   9396  */
   9397 static bool
   9398 wm_txeof(struct wm_txqueue *txq, u_int limit)
   9399 {
   9400 	struct wm_softc *sc = txq->txq_sc;
   9401 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9402 	struct wm_txsoft *txs;
   9403 	int count = 0;
   9404 	int i;
   9405 	uint8_t status;
   9406 	bool more = false;
   9407 
   9408 	KASSERT(mutex_owned(txq->txq_lock));
   9409 
   9410 	if (txq->txq_stopping)
   9411 		return false;
   9412 
   9413 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   9414 
   9415 	/*
   9416 	 * Go through the Tx list and free mbufs for those
   9417 	 * frames which have been transmitted.
   9418 	 */
   9419 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   9420 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   9421 		txs = &txq->txq_soft[i];
   9422 
   9423 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   9424 			device_xname(sc->sc_dev), i));
   9425 
   9426 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   9427 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9428 
   9429 		status =
   9430 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   9431 		if ((status & WTX_ST_DD) == 0) {
   9432 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   9433 			    BUS_DMASYNC_PREREAD);
   9434 			break;
   9435 		}
   9436 
   9437 		if (limit-- == 0) {
   9438 			more = true;
   9439 			DPRINTF(sc, WM_DEBUG_TX,
   9440 			    ("%s: TX: loop limited, job %d is not processed\n",
   9441 				device_xname(sc->sc_dev), i));
   9442 			break;
   9443 		}
   9444 
   9445 		count++;
   9446 		DPRINTF(sc, WM_DEBUG_TX,
   9447 		    ("%s: TX: job %d done: descs %d..%d\n",
   9448 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   9449 		    txs->txs_lastdesc));
   9450 
   9451 		/*
   9452 		 * XXX We should probably be using the statistics
   9453 		 * XXX registers, but I don't know if they exist
   9454 		 * XXX on chips before the i82544.
   9455 		 */
   9456 
   9457 #ifdef WM_EVENT_COUNTERS
   9458 		if (status & WTX_ST_TU)
   9459 			WM_Q_EVCNT_INCR(txq, underrun);
   9460 #endif /* WM_EVENT_COUNTERS */
   9461 
   9462 		/*
   9463 		 * 82574 and newer's document says the status field has neither
   9464 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   9465 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   9466 		 * Developer's Manual", 82574 datasheet and newer.
   9467 		 *
   9468 		 * XXX I saw the LC bit was set on I218 even though the media
   9469 		 * was full duplex, so the bit might be used for other
   9470 		 * meaning ...(I have no document).
   9471 		 */
   9472 
   9473 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   9474 		    && ((sc->sc_type < WM_T_82574)
   9475 			|| (sc->sc_type == WM_T_80003))) {
   9476 			if_statinc(ifp, if_oerrors);
   9477 			if (status & WTX_ST_LC)
   9478 				log(LOG_WARNING, "%s: late collision\n",
   9479 				    device_xname(sc->sc_dev));
   9480 			else if (status & WTX_ST_EC) {
   9481 				if_statadd(ifp, if_collisions,
   9482 				    TX_COLLISION_THRESHOLD + 1);
   9483 				log(LOG_WARNING, "%s: excessive collisions\n",
   9484 				    device_xname(sc->sc_dev));
   9485 			}
   9486 		} else
   9487 			if_statinc(ifp, if_opackets);
   9488 
   9489 		txq->txq_packets++;
   9490 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   9491 
   9492 		txq->txq_free += txs->txs_ndesc;
   9493 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   9494 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   9495 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   9496 		m_freem(txs->txs_mbuf);
   9497 		txs->txs_mbuf = NULL;
   9498 	}
   9499 
   9500 	/* Update the dirty transmit buffer pointer. */
   9501 	txq->txq_sdirty = i;
   9502 	DPRINTF(sc, WM_DEBUG_TX,
   9503 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   9504 
   9505 	if (count != 0)
   9506 		rnd_add_uint32(&sc->rnd_source, count);
   9507 
   9508 	/*
   9509 	 * If there are no more pending transmissions, cancel the watchdog
   9510 	 * timer.
   9511 	 */
   9512 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9513 		txq->txq_sending = false;
   9514 
   9515 	return more;
   9516 }
   9517 
   9518 static inline uint32_t
   9519 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9520 {
   9521 	struct wm_softc *sc = rxq->rxq_sc;
   9522 
   9523 	if (sc->sc_type == WM_T_82574)
   9524 		return EXTRXC_STATUS(
   9525 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9526 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9527 		return NQRXC_STATUS(
   9528 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9529 	else
   9530 		return rxq->rxq_descs[idx].wrx_status;
   9531 }
   9532 
   9533 static inline uint32_t
   9534 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9535 {
   9536 	struct wm_softc *sc = rxq->rxq_sc;
   9537 
   9538 	if (sc->sc_type == WM_T_82574)
   9539 		return EXTRXC_ERROR(
   9540 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9541 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9542 		return NQRXC_ERROR(
   9543 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9544 	else
   9545 		return rxq->rxq_descs[idx].wrx_errors;
   9546 }
   9547 
   9548 static inline uint16_t
   9549 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9550 {
   9551 	struct wm_softc *sc = rxq->rxq_sc;
   9552 
   9553 	if (sc->sc_type == WM_T_82574)
   9554 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9555 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9556 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9557 	else
   9558 		return rxq->rxq_descs[idx].wrx_special;
   9559 }
   9560 
   9561 static inline int
   9562 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9563 {
   9564 	struct wm_softc *sc = rxq->rxq_sc;
   9565 
   9566 	if (sc->sc_type == WM_T_82574)
   9567 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9568 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9569 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9570 	else
   9571 		return rxq->rxq_descs[idx].wrx_len;
   9572 }
   9573 
   9574 #ifdef WM_DEBUG
   9575 static inline uint32_t
   9576 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9577 {
   9578 	struct wm_softc *sc = rxq->rxq_sc;
   9579 
   9580 	if (sc->sc_type == WM_T_82574)
   9581 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9582 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9583 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9584 	else
   9585 		return 0;
   9586 }
   9587 
   9588 static inline uint8_t
   9589 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9590 {
   9591 	struct wm_softc *sc = rxq->rxq_sc;
   9592 
   9593 	if (sc->sc_type == WM_T_82574)
   9594 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9595 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9596 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9597 	else
   9598 		return 0;
   9599 }
   9600 #endif /* WM_DEBUG */
   9601 
   9602 static inline bool
   9603 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9604     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9605 {
   9606 
   9607 	if (sc->sc_type == WM_T_82574)
   9608 		return (status & ext_bit) != 0;
   9609 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9610 		return (status & nq_bit) != 0;
   9611 	else
   9612 		return (status & legacy_bit) != 0;
   9613 }
   9614 
   9615 static inline bool
   9616 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9617     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9618 {
   9619 
   9620 	if (sc->sc_type == WM_T_82574)
   9621 		return (error & ext_bit) != 0;
   9622 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9623 		return (error & nq_bit) != 0;
   9624 	else
   9625 		return (error & legacy_bit) != 0;
   9626 }
   9627 
   9628 static inline bool
   9629 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9630 {
   9631 
   9632 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9633 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9634 		return true;
   9635 	else
   9636 		return false;
   9637 }
   9638 
   9639 static inline bool
   9640 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9641 {
   9642 	struct wm_softc *sc = rxq->rxq_sc;
   9643 
   9644 	/* XXX missing error bit for newqueue? */
   9645 	if (wm_rxdesc_is_set_error(sc, errors,
   9646 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9647 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9648 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9649 		NQRXC_ERROR_RXE)) {
   9650 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9651 		    EXTRXC_ERROR_SE, 0))
   9652 			log(LOG_WARNING, "%s: symbol error\n",
   9653 			    device_xname(sc->sc_dev));
   9654 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9655 		    EXTRXC_ERROR_SEQ, 0))
   9656 			log(LOG_WARNING, "%s: receive sequence error\n",
   9657 			    device_xname(sc->sc_dev));
   9658 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9659 		    EXTRXC_ERROR_CE, 0))
   9660 			log(LOG_WARNING, "%s: CRC error\n",
   9661 			    device_xname(sc->sc_dev));
   9662 		return true;
   9663 	}
   9664 
   9665 	return false;
   9666 }
   9667 
   9668 static inline bool
   9669 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9670 {
   9671 	struct wm_softc *sc = rxq->rxq_sc;
   9672 
   9673 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9674 		NQRXC_STATUS_DD)) {
   9675 		/* We have processed all of the receive descriptors. */
   9676 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9677 		return false;
   9678 	}
   9679 
   9680 	return true;
   9681 }
   9682 
   9683 static inline bool
   9684 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9685     uint16_t vlantag, struct mbuf *m)
   9686 {
   9687 
   9688 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9689 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9690 		vlan_set_tag(m, le16toh(vlantag));
   9691 	}
   9692 
   9693 	return true;
   9694 }
   9695 
   9696 static inline void
   9697 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9698     uint32_t errors, struct mbuf *m)
   9699 {
   9700 	struct wm_softc *sc = rxq->rxq_sc;
   9701 
   9702 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9703 		if (wm_rxdesc_is_set_status(sc, status,
   9704 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9705 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9706 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9707 			if (wm_rxdesc_is_set_error(sc, errors,
   9708 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9709 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9710 		}
   9711 		if (wm_rxdesc_is_set_status(sc, status,
   9712 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9713 			/*
   9714 			 * Note: we don't know if this was TCP or UDP,
   9715 			 * so we just set both bits, and expect the
   9716 			 * upper layers to deal.
   9717 			 */
   9718 			WM_Q_EVCNT_INCR(rxq, tusum);
   9719 			m->m_pkthdr.csum_flags |=
   9720 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9721 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9722 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9723 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9724 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9725 		}
   9726 	}
   9727 }
   9728 
   9729 /*
   9730  * wm_rxeof:
   9731  *
   9732  *	Helper; handle receive interrupts.
   9733  */
   9734 static bool
   9735 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9736 {
   9737 	struct wm_softc *sc = rxq->rxq_sc;
   9738 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9739 	struct wm_rxsoft *rxs;
   9740 	struct mbuf *m;
   9741 	int i, len;
   9742 	int count = 0;
   9743 	uint32_t status, errors;
   9744 	uint16_t vlantag;
   9745 	bool more = false;
   9746 
   9747 	KASSERT(mutex_owned(rxq->rxq_lock));
   9748 
   9749 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9750 		rxs = &rxq->rxq_soft[i];
   9751 
   9752 		DPRINTF(sc, WM_DEBUG_RX,
   9753 		    ("%s: RX: checking descriptor %d\n",
   9754 			device_xname(sc->sc_dev), i));
   9755 		wm_cdrxsync(rxq, i,
   9756 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9757 
   9758 		status = wm_rxdesc_get_status(rxq, i);
   9759 		errors = wm_rxdesc_get_errors(rxq, i);
   9760 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9761 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9762 #ifdef WM_DEBUG
   9763 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9764 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9765 #endif
   9766 
   9767 		if (!wm_rxdesc_dd(rxq, i, status))
   9768 			break;
   9769 
   9770 		if (limit-- == 0) {
   9771 			more = true;
   9772 			DPRINTF(sc, WM_DEBUG_RX,
   9773 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9774 				device_xname(sc->sc_dev), i));
   9775 			break;
   9776 		}
   9777 
   9778 		count++;
   9779 		if (__predict_false(rxq->rxq_discard)) {
   9780 			DPRINTF(sc, WM_DEBUG_RX,
   9781 			    ("%s: RX: discarding contents of descriptor %d\n",
   9782 				device_xname(sc->sc_dev), i));
   9783 			wm_init_rxdesc(rxq, i);
   9784 			if (wm_rxdesc_is_eop(rxq, status)) {
   9785 				/* Reset our state. */
   9786 				DPRINTF(sc, WM_DEBUG_RX,
   9787 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9788 					device_xname(sc->sc_dev)));
   9789 				rxq->rxq_discard = 0;
   9790 			}
   9791 			continue;
   9792 		}
   9793 
   9794 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9795 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9796 
   9797 		m = rxs->rxs_mbuf;
   9798 
   9799 		/*
   9800 		 * Add a new receive buffer to the ring, unless of
   9801 		 * course the length is zero. Treat the latter as a
   9802 		 * failed mapping.
   9803 		 */
   9804 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9805 			/*
   9806 			 * Failed, throw away what we've done so
   9807 			 * far, and discard the rest of the packet.
   9808 			 */
   9809 			if_statinc(ifp, if_ierrors);
   9810 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9811 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9812 			wm_init_rxdesc(rxq, i);
   9813 			if (!wm_rxdesc_is_eop(rxq, status))
   9814 				rxq->rxq_discard = 1;
   9815 			if (rxq->rxq_head != NULL)
   9816 				m_freem(rxq->rxq_head);
   9817 			WM_RXCHAIN_RESET(rxq);
   9818 			DPRINTF(sc, WM_DEBUG_RX,
   9819 			    ("%s: RX: Rx buffer allocation failed, "
   9820 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9821 				rxq->rxq_discard ? " (discard)" : ""));
   9822 			continue;
   9823 		}
   9824 
   9825 		m->m_len = len;
   9826 		rxq->rxq_len += len;
   9827 		DPRINTF(sc, WM_DEBUG_RX,
   9828 		    ("%s: RX: buffer at %p len %d\n",
   9829 			device_xname(sc->sc_dev), m->m_data, len));
   9830 
   9831 		/* If this is not the end of the packet, keep looking. */
   9832 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9833 			WM_RXCHAIN_LINK(rxq, m);
   9834 			DPRINTF(sc, WM_DEBUG_RX,
   9835 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9836 				device_xname(sc->sc_dev), rxq->rxq_len));
   9837 			continue;
   9838 		}
   9839 
   9840 		/*
   9841 		 * Okay, we have the entire packet now. The chip is
   9842 		 * configured to include the FCS except I35[04], I21[01].
   9843 		 * (not all chips can be configured to strip it), so we need
   9844 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9845 		 * in RCTL register is always set, so we don't trim it.
   9846 		 * PCH2 and newer chip also not include FCS when jumbo
   9847 		 * frame is used to do workaround an errata.
   9848 		 * May need to adjust length of previous mbuf in the
   9849 		 * chain if the current mbuf is too short.
   9850 		 */
   9851 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9852 			if (m->m_len < ETHER_CRC_LEN) {
   9853 				rxq->rxq_tail->m_len
   9854 				    -= (ETHER_CRC_LEN - m->m_len);
   9855 				m->m_len = 0;
   9856 			} else
   9857 				m->m_len -= ETHER_CRC_LEN;
   9858 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9859 		} else
   9860 			len = rxq->rxq_len;
   9861 
   9862 		WM_RXCHAIN_LINK(rxq, m);
   9863 
   9864 		*rxq->rxq_tailp = NULL;
   9865 		m = rxq->rxq_head;
   9866 
   9867 		WM_RXCHAIN_RESET(rxq);
   9868 
   9869 		DPRINTF(sc, WM_DEBUG_RX,
   9870 		    ("%s: RX: have entire packet, len -> %d\n",
   9871 			device_xname(sc->sc_dev), len));
   9872 
   9873 		/* If an error occurred, update stats and drop the packet. */
   9874 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9875 			m_freem(m);
   9876 			continue;
   9877 		}
   9878 
   9879 		/* No errors.  Receive the packet. */
   9880 		m_set_rcvif(m, ifp);
   9881 		m->m_pkthdr.len = len;
   9882 		/*
   9883 		 * TODO
   9884 		 * should be save rsshash and rsstype to this mbuf.
   9885 		 */
   9886 		DPRINTF(sc, WM_DEBUG_RX,
   9887 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9888 			device_xname(sc->sc_dev), rsstype, rsshash));
   9889 
   9890 		/*
   9891 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9892 		 * for us.  Associate the tag with the packet.
   9893 		 */
   9894 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9895 			continue;
   9896 
   9897 		/* Set up checksum info for this packet. */
   9898 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9899 
   9900 		rxq->rxq_packets++;
   9901 		rxq->rxq_bytes += len;
   9902 		/* Pass it on. */
   9903 		if_percpuq_enqueue(sc->sc_ipq, m);
   9904 
   9905 		if (rxq->rxq_stopping)
   9906 			break;
   9907 	}
   9908 	rxq->rxq_ptr = i;
   9909 
   9910 	if (count != 0)
   9911 		rnd_add_uint32(&sc->rnd_source, count);
   9912 
   9913 	DPRINTF(sc, WM_DEBUG_RX,
   9914 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9915 
   9916 	return more;
   9917 }
   9918 
   9919 /*
   9920  * wm_linkintr_gmii:
   9921  *
   9922  *	Helper; handle link interrupts for GMII.
   9923  */
   9924 static void
   9925 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9926 {
   9927 	device_t dev = sc->sc_dev;
   9928 	uint32_t status, reg;
   9929 	bool link;
   9930 	int rv;
   9931 
   9932 	KASSERT(WM_CORE_LOCKED(sc));
   9933 
   9934 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9935 		__func__));
   9936 
   9937 	if ((icr & ICR_LSC) == 0) {
   9938 		if (icr & ICR_RXSEQ)
   9939 			DPRINTF(sc, WM_DEBUG_LINK,
   9940 			    ("%s: LINK Receive sequence error\n",
   9941 				device_xname(dev)));
   9942 		return;
   9943 	}
   9944 
   9945 	/* Link status changed */
   9946 	status = CSR_READ(sc, WMREG_STATUS);
   9947 	link = status & STATUS_LU;
   9948 	if (link) {
   9949 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9950 			device_xname(dev),
   9951 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9952 		if (wm_phy_need_linkdown_discard(sc)) {
   9953 			DPRINTF(sc, WM_DEBUG_LINK,
   9954 			    ("%s: linkintr: Clear linkdown discard flag\n",
   9955 				device_xname(dev)));
   9956 			wm_clear_linkdown_discard(sc);
   9957 		}
   9958 	} else {
   9959 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9960 			device_xname(dev)));
   9961 		if (wm_phy_need_linkdown_discard(sc)) {
   9962 			DPRINTF(sc, WM_DEBUG_LINK,
   9963 			    ("%s: linkintr: Set linkdown discard flag\n",
   9964 				device_xname(dev)));
   9965 			wm_set_linkdown_discard(sc);
   9966 		}
   9967 	}
   9968 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9969 		wm_gig_downshift_workaround_ich8lan(sc);
   9970 
   9971 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
   9972 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9973 
   9974 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9975 		device_xname(dev)));
   9976 	mii_pollstat(&sc->sc_mii);
   9977 	if (sc->sc_type == WM_T_82543) {
   9978 		int miistatus, active;
   9979 
   9980 		/*
   9981 		 * With 82543, we need to force speed and
   9982 		 * duplex on the MAC equal to what the PHY
   9983 		 * speed and duplex configuration is.
   9984 		 */
   9985 		miistatus = sc->sc_mii.mii_media_status;
   9986 
   9987 		if (miistatus & IFM_ACTIVE) {
   9988 			active = sc->sc_mii.mii_media_active;
   9989 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9990 			switch (IFM_SUBTYPE(active)) {
   9991 			case IFM_10_T:
   9992 				sc->sc_ctrl |= CTRL_SPEED_10;
   9993 				break;
   9994 			case IFM_100_TX:
   9995 				sc->sc_ctrl |= CTRL_SPEED_100;
   9996 				break;
   9997 			case IFM_1000_T:
   9998 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9999 				break;
   10000 			default:
   10001 				/*
   10002 				 * Fiber?
   10003 				 * Shoud not enter here.
   10004 				 */
   10005 				device_printf(dev, "unknown media (%x)\n",
   10006 				    active);
   10007 				break;
   10008 			}
   10009 			if (active & IFM_FDX)
   10010 				sc->sc_ctrl |= CTRL_FD;
   10011 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10012 		}
   10013 	} else if (sc->sc_type == WM_T_PCH) {
   10014 		wm_k1_gig_workaround_hv(sc,
   10015 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10016 	}
   10017 
   10018 	/*
   10019 	 * When connected at 10Mbps half-duplex, some parts are excessively
   10020 	 * aggressive resulting in many collisions. To avoid this, increase
   10021 	 * the IPG and reduce Rx latency in the PHY.
   10022 	 */
   10023 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   10024 	    && link) {
   10025 		uint32_t tipg_reg;
   10026 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   10027 		bool fdx;
   10028 		uint16_t emi_addr, emi_val;
   10029 
   10030 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   10031 		tipg_reg &= ~TIPG_IPGT_MASK;
   10032 		fdx = status & STATUS_FD;
   10033 
   10034 		if (!fdx && (speed == STATUS_SPEED_10)) {
   10035 			tipg_reg |= 0xff;
   10036 			/* Reduce Rx latency in analog PHY */
   10037 			emi_val = 0;
   10038 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   10039 		    fdx && speed != STATUS_SPEED_1000) {
   10040 			tipg_reg |= 0xc;
   10041 			emi_val = 1;
   10042 		} else {
   10043 			/* Roll back the default values */
   10044 			tipg_reg |= 0x08;
   10045 			emi_val = 1;
   10046 		}
   10047 
   10048 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   10049 
   10050 		rv = sc->phy.acquire(sc);
   10051 		if (rv)
   10052 			return;
   10053 
   10054 		if (sc->sc_type == WM_T_PCH2)
   10055 			emi_addr = I82579_RX_CONFIG;
   10056 		else
   10057 			emi_addr = I217_RX_CONFIG;
   10058 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   10059 
   10060 		if (sc->sc_type >= WM_T_PCH_LPT) {
   10061 			uint16_t phy_reg;
   10062 
   10063 			sc->phy.readreg_locked(dev, 2,
   10064 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   10065 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   10066 			if (speed == STATUS_SPEED_100
   10067 			    || speed == STATUS_SPEED_10)
   10068 				phy_reg |= 0x3e8;
   10069 			else
   10070 				phy_reg |= 0xfa;
   10071 			sc->phy.writereg_locked(dev, 2,
   10072 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   10073 
   10074 			if (speed == STATUS_SPEED_1000) {
   10075 				sc->phy.readreg_locked(dev, 2,
   10076 				    HV_PM_CTRL, &phy_reg);
   10077 
   10078 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   10079 
   10080 				sc->phy.writereg_locked(dev, 2,
   10081 				    HV_PM_CTRL, phy_reg);
   10082 			}
   10083 		}
   10084 		sc->phy.release(sc);
   10085 
   10086 		if (rv)
   10087 			return;
   10088 
   10089 		if (sc->sc_type >= WM_T_PCH_SPT) {
   10090 			uint16_t data, ptr_gap;
   10091 
   10092 			if (speed == STATUS_SPEED_1000) {
   10093 				rv = sc->phy.acquire(sc);
   10094 				if (rv)
   10095 					return;
   10096 
   10097 				rv = sc->phy.readreg_locked(dev, 2,
   10098 				    I82579_UNKNOWN1, &data);
   10099 				if (rv) {
   10100 					sc->phy.release(sc);
   10101 					return;
   10102 				}
   10103 
   10104 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   10105 				if (ptr_gap < 0x18) {
   10106 					data &= ~(0x3ff << 2);
   10107 					data |= (0x18 << 2);
   10108 					rv = sc->phy.writereg_locked(dev,
   10109 					    2, I82579_UNKNOWN1, data);
   10110 				}
   10111 				sc->phy.release(sc);
   10112 				if (rv)
   10113 					return;
   10114 			} else {
   10115 				rv = sc->phy.acquire(sc);
   10116 				if (rv)
   10117 					return;
   10118 
   10119 				rv = sc->phy.writereg_locked(dev, 2,
   10120 				    I82579_UNKNOWN1, 0xc023);
   10121 				sc->phy.release(sc);
   10122 				if (rv)
   10123 					return;
   10124 
   10125 			}
   10126 		}
   10127 	}
   10128 
   10129 	/*
   10130 	 * I217 Packet Loss issue:
   10131 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   10132 	 * on power up.
   10133 	 * Set the Beacon Duration for I217 to 8 usec
   10134 	 */
   10135 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10136 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   10137 		reg &= ~FEXTNVM4_BEACON_DURATION;
   10138 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   10139 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   10140 	}
   10141 
   10142 	/* Work-around I218 hang issue */
   10143 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   10144 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   10145 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   10146 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   10147 		wm_k1_workaround_lpt_lp(sc, link);
   10148 
   10149 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10150 		/*
   10151 		 * Set platform power management values for Latency
   10152 		 * Tolerance Reporting (LTR)
   10153 		 */
   10154 		wm_platform_pm_pch_lpt(sc,
   10155 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10156 	}
   10157 
   10158 	/* Clear link partner's EEE ability */
   10159 	sc->eee_lp_ability = 0;
   10160 
   10161 	/* FEXTNVM6 K1-off workaround */
   10162 	if (sc->sc_type == WM_T_PCH_SPT) {
   10163 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   10164 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   10165 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   10166 		else
   10167 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   10168 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   10169 	}
   10170 
   10171 	if (!link)
   10172 		return;
   10173 
   10174 	switch (sc->sc_type) {
   10175 	case WM_T_PCH2:
   10176 		wm_k1_workaround_lv(sc);
   10177 		/* FALLTHROUGH */
   10178 	case WM_T_PCH:
   10179 		if (sc->sc_phytype == WMPHY_82578)
   10180 			wm_link_stall_workaround_hv(sc);
   10181 		break;
   10182 	default:
   10183 		break;
   10184 	}
   10185 
   10186 	/* Enable/Disable EEE after link up */
   10187 	if (sc->sc_phytype > WMPHY_82579)
   10188 		wm_set_eee_pchlan(sc);
   10189 }
   10190 
   10191 /*
   10192  * wm_linkintr_tbi:
   10193  *
   10194  *	Helper; handle link interrupts for TBI mode.
   10195  */
   10196 static void
   10197 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   10198 {
   10199 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10200 	uint32_t status;
   10201 
   10202 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10203 		__func__));
   10204 
   10205 	status = CSR_READ(sc, WMREG_STATUS);
   10206 	if (icr & ICR_LSC) {
   10207 		wm_check_for_link(sc);
   10208 		if (status & STATUS_LU) {
   10209 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10210 				device_xname(sc->sc_dev),
   10211 				(status & STATUS_FD) ? "FDX" : "HDX"));
   10212 			/*
   10213 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10214 			 * so we should update sc->sc_ctrl
   10215 			 */
   10216 
   10217 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10218 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10219 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10220 			if (status & STATUS_FD)
   10221 				sc->sc_tctl |=
   10222 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10223 			else
   10224 				sc->sc_tctl |=
   10225 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10226 			if (sc->sc_ctrl & CTRL_TFCE)
   10227 				sc->sc_fcrtl |= FCRTL_XONE;
   10228 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10229 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10230 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   10231 			sc->sc_tbi_linkup = 1;
   10232 			if_link_state_change(ifp, LINK_STATE_UP);
   10233 		} else {
   10234 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10235 				device_xname(sc->sc_dev)));
   10236 			sc->sc_tbi_linkup = 0;
   10237 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10238 		}
   10239 		/* Update LED */
   10240 		wm_tbi_serdes_set_linkled(sc);
   10241 	} else if (icr & ICR_RXSEQ)
   10242 		DPRINTF(sc, WM_DEBUG_LINK,
   10243 		    ("%s: LINK: Receive sequence error\n",
   10244 			device_xname(sc->sc_dev)));
   10245 }
   10246 
   10247 /*
   10248  * wm_linkintr_serdes:
   10249  *
   10250  *	Helper; handle link interrupts for TBI mode.
   10251  */
   10252 static void
   10253 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   10254 {
   10255 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10256 	struct mii_data *mii = &sc->sc_mii;
   10257 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10258 	uint32_t pcs_adv, pcs_lpab, reg;
   10259 
   10260 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10261 		__func__));
   10262 
   10263 	if (icr & ICR_LSC) {
   10264 		/* Check PCS */
   10265 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10266 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   10267 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   10268 				device_xname(sc->sc_dev)));
   10269 			mii->mii_media_status |= IFM_ACTIVE;
   10270 			sc->sc_tbi_linkup = 1;
   10271 			if_link_state_change(ifp, LINK_STATE_UP);
   10272 		} else {
   10273 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10274 				device_xname(sc->sc_dev)));
   10275 			mii->mii_media_status |= IFM_NONE;
   10276 			sc->sc_tbi_linkup = 0;
   10277 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10278 			wm_tbi_serdes_set_linkled(sc);
   10279 			return;
   10280 		}
   10281 		mii->mii_media_active |= IFM_1000_SX;
   10282 		if ((reg & PCS_LSTS_FDX) != 0)
   10283 			mii->mii_media_active |= IFM_FDX;
   10284 		else
   10285 			mii->mii_media_active |= IFM_HDX;
   10286 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10287 			/* Check flow */
   10288 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10289 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10290 				DPRINTF(sc, WM_DEBUG_LINK,
   10291 				    ("XXX LINKOK but not ACOMP\n"));
   10292 				return;
   10293 			}
   10294 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10295 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10296 			DPRINTF(sc, WM_DEBUG_LINK,
   10297 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   10298 			if ((pcs_adv & TXCW_SYM_PAUSE)
   10299 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10300 				mii->mii_media_active |= IFM_FLOW
   10301 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10302 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10303 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10304 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   10305 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10306 				mii->mii_media_active |= IFM_FLOW
   10307 				    | IFM_ETH_TXPAUSE;
   10308 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   10309 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10310 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10311 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10312 				mii->mii_media_active |= IFM_FLOW
   10313 				    | IFM_ETH_RXPAUSE;
   10314 		}
   10315 		/* Update LED */
   10316 		wm_tbi_serdes_set_linkled(sc);
   10317 	} else
   10318 		DPRINTF(sc, WM_DEBUG_LINK,
   10319 		    ("%s: LINK: Receive sequence error\n",
   10320 		    device_xname(sc->sc_dev)));
   10321 }
   10322 
   10323 /*
   10324  * wm_linkintr:
   10325  *
   10326  *	Helper; handle link interrupts.
   10327  */
   10328 static void
   10329 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   10330 {
   10331 
   10332 	KASSERT(WM_CORE_LOCKED(sc));
   10333 
   10334 	if (sc->sc_flags & WM_F_HAS_MII)
   10335 		wm_linkintr_gmii(sc, icr);
   10336 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10337 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   10338 		wm_linkintr_serdes(sc, icr);
   10339 	else
   10340 		wm_linkintr_tbi(sc, icr);
   10341 }
   10342 
   10343 
   10344 static inline void
   10345 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   10346 {
   10347 
   10348 	if (wmq->wmq_txrx_use_workqueue)
   10349 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   10350 	else
   10351 		softint_schedule(wmq->wmq_si);
   10352 }
   10353 
   10354 static inline void
   10355 wm_legacy_intr_disable(struct wm_softc *sc)
   10356 {
   10357 
   10358 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   10359 }
   10360 
   10361 static inline void
   10362 wm_legacy_intr_enable(struct wm_softc *sc)
   10363 {
   10364 
   10365 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   10366 }
   10367 
   10368 /*
   10369  * wm_intr_legacy:
   10370  *
   10371  *	Interrupt service routine for INTx and MSI.
   10372  */
   10373 static int
   10374 wm_intr_legacy(void *arg)
   10375 {
   10376 	struct wm_softc *sc = arg;
   10377 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10378 	struct wm_queue *wmq = &sc->sc_queue[0];
   10379 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10380 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10381 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10382 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10383 	uint32_t icr, rndval = 0;
   10384 	bool more = false;
   10385 
   10386 	icr = CSR_READ(sc, WMREG_ICR);
   10387 	if ((icr & sc->sc_icr) == 0)
   10388 		return 0;
   10389 
   10390 	DPRINTF(sc, WM_DEBUG_TX,
   10391 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   10392 	if (rndval == 0)
   10393 		rndval = icr;
   10394 
   10395 	mutex_enter(txq->txq_lock);
   10396 
   10397 	if (txq->txq_stopping) {
   10398 		mutex_exit(txq->txq_lock);
   10399 		return 1;
   10400 	}
   10401 
   10402 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10403 	if (icr & ICR_TXDW) {
   10404 		DPRINTF(sc, WM_DEBUG_TX,
   10405 		    ("%s: TX: got TXDW interrupt\n",
   10406 			device_xname(sc->sc_dev)));
   10407 		WM_Q_EVCNT_INCR(txq, txdw);
   10408 	}
   10409 #endif
   10410 	if (txlimit > 0) {
   10411 		more |= wm_txeof(txq, txlimit);
   10412 		if (!IF_IS_EMPTY(&ifp->if_snd))
   10413 			more = true;
   10414 	} else
   10415 		more = true;
   10416 	mutex_exit(txq->txq_lock);
   10417 
   10418 	mutex_enter(rxq->rxq_lock);
   10419 
   10420 	if (rxq->rxq_stopping) {
   10421 		mutex_exit(rxq->rxq_lock);
   10422 		return 1;
   10423 	}
   10424 
   10425 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10426 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   10427 		DPRINTF(sc, WM_DEBUG_RX,
   10428 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
   10429 			device_xname(sc->sc_dev),
   10430 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   10431 		WM_Q_EVCNT_INCR(rxq, intr);
   10432 	}
   10433 #endif
   10434 	if (rxlimit > 0) {
   10435 		/*
   10436 		 * wm_rxeof() does *not* call upper layer functions directly,
   10437 		 * as if_percpuq_enqueue() just call softint_schedule().
   10438 		 * So, we can call wm_rxeof() in interrupt context.
   10439 		 */
   10440 		more = wm_rxeof(rxq, rxlimit);
   10441 	} else
   10442 		more = true;
   10443 
   10444 	mutex_exit(rxq->rxq_lock);
   10445 
   10446 	WM_CORE_LOCK(sc);
   10447 
   10448 	if (sc->sc_core_stopping) {
   10449 		WM_CORE_UNLOCK(sc);
   10450 		return 1;
   10451 	}
   10452 
   10453 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   10454 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10455 		wm_linkintr(sc, icr);
   10456 	}
   10457 	if ((icr & ICR_GPI(0)) != 0)
   10458 		device_printf(sc->sc_dev, "got module interrupt\n");
   10459 
   10460 	WM_CORE_UNLOCK(sc);
   10461 
   10462 	if (icr & ICR_RXO) {
   10463 #if defined(WM_DEBUG)
   10464 		log(LOG_WARNING, "%s: Receive overrun\n",
   10465 		    device_xname(sc->sc_dev));
   10466 #endif /* defined(WM_DEBUG) */
   10467 	}
   10468 
   10469 	rnd_add_uint32(&sc->rnd_source, rndval);
   10470 
   10471 	if (more) {
   10472 		/* Try to get more packets going. */
   10473 		wm_legacy_intr_disable(sc);
   10474 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10475 		wm_sched_handle_queue(sc, wmq);
   10476 	}
   10477 
   10478 	return 1;
   10479 }
   10480 
   10481 static inline void
   10482 wm_txrxintr_disable(struct wm_queue *wmq)
   10483 {
   10484 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10485 
   10486 	if (__predict_false(!wm_is_using_msix(sc))) {
   10487 		wm_legacy_intr_disable(sc);
   10488 		return;
   10489 	}
   10490 
   10491 	if (sc->sc_type == WM_T_82574)
   10492 		CSR_WRITE(sc, WMREG_IMC,
   10493 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   10494 	else if (sc->sc_type == WM_T_82575)
   10495 		CSR_WRITE(sc, WMREG_EIMC,
   10496 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10497 	else
   10498 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   10499 }
   10500 
   10501 static inline void
   10502 wm_txrxintr_enable(struct wm_queue *wmq)
   10503 {
   10504 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10505 
   10506 	wm_itrs_calculate(sc, wmq);
   10507 
   10508 	if (__predict_false(!wm_is_using_msix(sc))) {
   10509 		wm_legacy_intr_enable(sc);
   10510 		return;
   10511 	}
   10512 
   10513 	/*
   10514 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   10515 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   10516 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   10517 	 * while each wm_handle_queue(wmq) is runnig.
   10518 	 */
   10519 	if (sc->sc_type == WM_T_82574)
   10520 		CSR_WRITE(sc, WMREG_IMS,
   10521 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   10522 	else if (sc->sc_type == WM_T_82575)
   10523 		CSR_WRITE(sc, WMREG_EIMS,
   10524 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10525 	else
   10526 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   10527 }
   10528 
   10529 static int
   10530 wm_txrxintr_msix(void *arg)
   10531 {
   10532 	struct wm_queue *wmq = arg;
   10533 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10534 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10535 	struct wm_softc *sc = txq->txq_sc;
   10536 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10537 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10538 	bool txmore;
   10539 	bool rxmore;
   10540 
   10541 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   10542 
   10543 	DPRINTF(sc, WM_DEBUG_TX,
   10544 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   10545 
   10546 	wm_txrxintr_disable(wmq);
   10547 
   10548 	mutex_enter(txq->txq_lock);
   10549 
   10550 	if (txq->txq_stopping) {
   10551 		mutex_exit(txq->txq_lock);
   10552 		return 1;
   10553 	}
   10554 
   10555 	WM_Q_EVCNT_INCR(txq, txdw);
   10556 	if (txlimit > 0) {
   10557 		txmore = wm_txeof(txq, txlimit);
   10558 		/* wm_deferred start() is done in wm_handle_queue(). */
   10559 	} else
   10560 		txmore = true;
   10561 	mutex_exit(txq->txq_lock);
   10562 
   10563 	DPRINTF(sc, WM_DEBUG_RX,
   10564 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   10565 	mutex_enter(rxq->rxq_lock);
   10566 
   10567 	if (rxq->rxq_stopping) {
   10568 		mutex_exit(rxq->rxq_lock);
   10569 		return 1;
   10570 	}
   10571 
   10572 	WM_Q_EVCNT_INCR(rxq, intr);
   10573 	if (rxlimit > 0) {
   10574 		rxmore = wm_rxeof(rxq, rxlimit);
   10575 	} else
   10576 		rxmore = true;
   10577 	mutex_exit(rxq->rxq_lock);
   10578 
   10579 	wm_itrs_writereg(sc, wmq);
   10580 
   10581 	if (txmore || rxmore) {
   10582 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10583 		wm_sched_handle_queue(sc, wmq);
   10584 	} else
   10585 		wm_txrxintr_enable(wmq);
   10586 
   10587 	return 1;
   10588 }
   10589 
   10590 static void
   10591 wm_handle_queue(void *arg)
   10592 {
   10593 	struct wm_queue *wmq = arg;
   10594 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10595 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10596 	struct wm_softc *sc = txq->txq_sc;
   10597 	u_int txlimit = sc->sc_tx_process_limit;
   10598 	u_int rxlimit = sc->sc_rx_process_limit;
   10599 	bool txmore;
   10600 	bool rxmore;
   10601 
   10602 	mutex_enter(txq->txq_lock);
   10603 	if (txq->txq_stopping) {
   10604 		mutex_exit(txq->txq_lock);
   10605 		return;
   10606 	}
   10607 	txmore = wm_txeof(txq, txlimit);
   10608 	wm_deferred_start_locked(txq);
   10609 	mutex_exit(txq->txq_lock);
   10610 
   10611 	mutex_enter(rxq->rxq_lock);
   10612 	if (rxq->rxq_stopping) {
   10613 		mutex_exit(rxq->rxq_lock);
   10614 		return;
   10615 	}
   10616 	WM_Q_EVCNT_INCR(rxq, defer);
   10617 	rxmore = wm_rxeof(rxq, rxlimit);
   10618 	mutex_exit(rxq->rxq_lock);
   10619 
   10620 	if (txmore || rxmore) {
   10621 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10622 		wm_sched_handle_queue(sc, wmq);
   10623 	} else
   10624 		wm_txrxintr_enable(wmq);
   10625 }
   10626 
   10627 static void
   10628 wm_handle_queue_work(struct work *wk, void *context)
   10629 {
   10630 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10631 
   10632 	/*
   10633 	 * "enqueued flag" is not required here.
   10634 	 */
   10635 	wm_handle_queue(wmq);
   10636 }
   10637 
   10638 /*
   10639  * wm_linkintr_msix:
   10640  *
   10641  *	Interrupt service routine for link status change for MSI-X.
   10642  */
   10643 static int
   10644 wm_linkintr_msix(void *arg)
   10645 {
   10646 	struct wm_softc *sc = arg;
   10647 	uint32_t reg;
   10648 	bool has_rxo;
   10649 
   10650 	reg = CSR_READ(sc, WMREG_ICR);
   10651 	WM_CORE_LOCK(sc);
   10652 	DPRINTF(sc, WM_DEBUG_LINK,
   10653 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10654 		device_xname(sc->sc_dev), reg));
   10655 
   10656 	if (sc->sc_core_stopping)
   10657 		goto out;
   10658 
   10659 	if ((reg & ICR_LSC) != 0) {
   10660 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10661 		wm_linkintr(sc, ICR_LSC);
   10662 	}
   10663 	if ((reg & ICR_GPI(0)) != 0)
   10664 		device_printf(sc->sc_dev, "got module interrupt\n");
   10665 
   10666 	/*
   10667 	 * XXX 82574 MSI-X mode workaround
   10668 	 *
   10669 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10670 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10671 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10672 	 * interrupts by writing WMREG_ICS to process receive packets.
   10673 	 */
   10674 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10675 #if defined(WM_DEBUG)
   10676 		log(LOG_WARNING, "%s: Receive overrun\n",
   10677 		    device_xname(sc->sc_dev));
   10678 #endif /* defined(WM_DEBUG) */
   10679 
   10680 		has_rxo = true;
   10681 		/*
   10682 		 * The RXO interrupt is very high rate when receive traffic is
   10683 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10684 		 * interrupts. ICR_OTHER will be enabled at the end of
   10685 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10686 		 * ICR_RXQ(1) interrupts.
   10687 		 */
   10688 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10689 
   10690 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10691 	}
   10692 
   10693 
   10694 
   10695 out:
   10696 	WM_CORE_UNLOCK(sc);
   10697 
   10698 	if (sc->sc_type == WM_T_82574) {
   10699 		if (!has_rxo)
   10700 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10701 		else
   10702 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10703 	} else if (sc->sc_type == WM_T_82575)
   10704 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10705 	else
   10706 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10707 
   10708 	return 1;
   10709 }
   10710 
   10711 /*
   10712  * Media related.
   10713  * GMII, SGMII, TBI (and SERDES)
   10714  */
   10715 
   10716 /* Common */
   10717 
   10718 /*
   10719  * wm_tbi_serdes_set_linkled:
   10720  *
   10721  *	Update the link LED on TBI and SERDES devices.
   10722  */
   10723 static void
   10724 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10725 {
   10726 
   10727 	if (sc->sc_tbi_linkup)
   10728 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10729 	else
   10730 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10731 
   10732 	/* 82540 or newer devices are active low */
   10733 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10734 
   10735 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10736 }
   10737 
   10738 /* GMII related */
   10739 
   10740 /*
   10741  * wm_gmii_reset:
   10742  *
   10743  *	Reset the PHY.
   10744  */
   10745 static void
   10746 wm_gmii_reset(struct wm_softc *sc)
   10747 {
   10748 	uint32_t reg;
   10749 	int rv;
   10750 
   10751 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10752 		device_xname(sc->sc_dev), __func__));
   10753 
   10754 	rv = sc->phy.acquire(sc);
   10755 	if (rv != 0) {
   10756 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10757 		    __func__);
   10758 		return;
   10759 	}
   10760 
   10761 	switch (sc->sc_type) {
   10762 	case WM_T_82542_2_0:
   10763 	case WM_T_82542_2_1:
   10764 		/* null */
   10765 		break;
   10766 	case WM_T_82543:
   10767 		/*
   10768 		 * With 82543, we need to force speed and duplex on the MAC
   10769 		 * equal to what the PHY speed and duplex configuration is.
   10770 		 * In addition, we need to perform a hardware reset on the PHY
   10771 		 * to take it out of reset.
   10772 		 */
   10773 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10774 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10775 
   10776 		/* The PHY reset pin is active-low. */
   10777 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10778 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10779 		    CTRL_EXT_SWDPIN(4));
   10780 		reg |= CTRL_EXT_SWDPIO(4);
   10781 
   10782 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10783 		CSR_WRITE_FLUSH(sc);
   10784 		delay(10*1000);
   10785 
   10786 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10787 		CSR_WRITE_FLUSH(sc);
   10788 		delay(150);
   10789 #if 0
   10790 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10791 #endif
   10792 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10793 		break;
   10794 	case WM_T_82544:	/* Reset 10000us */
   10795 	case WM_T_82540:
   10796 	case WM_T_82545:
   10797 	case WM_T_82545_3:
   10798 	case WM_T_82546:
   10799 	case WM_T_82546_3:
   10800 	case WM_T_82541:
   10801 	case WM_T_82541_2:
   10802 	case WM_T_82547:
   10803 	case WM_T_82547_2:
   10804 	case WM_T_82571:	/* Reset 100us */
   10805 	case WM_T_82572:
   10806 	case WM_T_82573:
   10807 	case WM_T_82574:
   10808 	case WM_T_82575:
   10809 	case WM_T_82576:
   10810 	case WM_T_82580:
   10811 	case WM_T_I350:
   10812 	case WM_T_I354:
   10813 	case WM_T_I210:
   10814 	case WM_T_I211:
   10815 	case WM_T_82583:
   10816 	case WM_T_80003:
   10817 		/* Generic reset */
   10818 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10819 		CSR_WRITE_FLUSH(sc);
   10820 		delay(20000);
   10821 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10822 		CSR_WRITE_FLUSH(sc);
   10823 		delay(20000);
   10824 
   10825 		if ((sc->sc_type == WM_T_82541)
   10826 		    || (sc->sc_type == WM_T_82541_2)
   10827 		    || (sc->sc_type == WM_T_82547)
   10828 		    || (sc->sc_type == WM_T_82547_2)) {
   10829 			/* Workaround for igp are done in igp_reset() */
   10830 			/* XXX add code to set LED after phy reset */
   10831 		}
   10832 		break;
   10833 	case WM_T_ICH8:
   10834 	case WM_T_ICH9:
   10835 	case WM_T_ICH10:
   10836 	case WM_T_PCH:
   10837 	case WM_T_PCH2:
   10838 	case WM_T_PCH_LPT:
   10839 	case WM_T_PCH_SPT:
   10840 	case WM_T_PCH_CNP:
   10841 		/* Generic reset */
   10842 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10843 		CSR_WRITE_FLUSH(sc);
   10844 		delay(100);
   10845 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10846 		CSR_WRITE_FLUSH(sc);
   10847 		delay(150);
   10848 		break;
   10849 	default:
   10850 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10851 		    __func__);
   10852 		break;
   10853 	}
   10854 
   10855 	sc->phy.release(sc);
   10856 
   10857 	/* get_cfg_done */
   10858 	wm_get_cfg_done(sc);
   10859 
   10860 	/* Extra setup */
   10861 	switch (sc->sc_type) {
   10862 	case WM_T_82542_2_0:
   10863 	case WM_T_82542_2_1:
   10864 	case WM_T_82543:
   10865 	case WM_T_82544:
   10866 	case WM_T_82540:
   10867 	case WM_T_82545:
   10868 	case WM_T_82545_3:
   10869 	case WM_T_82546:
   10870 	case WM_T_82546_3:
   10871 	case WM_T_82541_2:
   10872 	case WM_T_82547_2:
   10873 	case WM_T_82571:
   10874 	case WM_T_82572:
   10875 	case WM_T_82573:
   10876 	case WM_T_82574:
   10877 	case WM_T_82583:
   10878 	case WM_T_82575:
   10879 	case WM_T_82576:
   10880 	case WM_T_82580:
   10881 	case WM_T_I350:
   10882 	case WM_T_I354:
   10883 	case WM_T_I210:
   10884 	case WM_T_I211:
   10885 	case WM_T_80003:
   10886 		/* Null */
   10887 		break;
   10888 	case WM_T_82541:
   10889 	case WM_T_82547:
   10890 		/* XXX Configure actively LED after PHY reset */
   10891 		break;
   10892 	case WM_T_ICH8:
   10893 	case WM_T_ICH9:
   10894 	case WM_T_ICH10:
   10895 	case WM_T_PCH:
   10896 	case WM_T_PCH2:
   10897 	case WM_T_PCH_LPT:
   10898 	case WM_T_PCH_SPT:
   10899 	case WM_T_PCH_CNP:
   10900 		wm_phy_post_reset(sc);
   10901 		break;
   10902 	default:
   10903 		panic("%s: unknown type\n", __func__);
   10904 		break;
   10905 	}
   10906 }
   10907 
   10908 /*
   10909  * Set up sc_phytype and mii_{read|write}reg.
   10910  *
   10911  *  To identify PHY type, correct read/write function should be selected.
   10912  * To select correct read/write function, PCI ID or MAC type are required
   10913  * without accessing PHY registers.
   10914  *
   10915  *  On the first call of this function, PHY ID is not known yet. Check
   10916  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10917  * result might be incorrect.
   10918  *
   10919  *  In the second call, PHY OUI and model is used to identify PHY type.
   10920  * It might not be perfect because of the lack of compared entry, but it
   10921  * would be better than the first call.
   10922  *
   10923  *  If the detected new result and previous assumption is different,
   10924  * a diagnostic message will be printed.
   10925  */
   10926 static void
   10927 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10928     uint16_t phy_model)
   10929 {
   10930 	device_t dev = sc->sc_dev;
   10931 	struct mii_data *mii = &sc->sc_mii;
   10932 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10933 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10934 	mii_readreg_t new_readreg;
   10935 	mii_writereg_t new_writereg;
   10936 	bool dodiag = true;
   10937 
   10938 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10939 		device_xname(sc->sc_dev), __func__));
   10940 
   10941 	/*
   10942 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10943 	 * incorrect. So don't print diag output when it's 2nd call.
   10944 	 */
   10945 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10946 		dodiag = false;
   10947 
   10948 	if (mii->mii_readreg == NULL) {
   10949 		/*
   10950 		 *  This is the first call of this function. For ICH and PCH
   10951 		 * variants, it's difficult to determine the PHY access method
   10952 		 * by sc_type, so use the PCI product ID for some devices.
   10953 		 */
   10954 
   10955 		switch (sc->sc_pcidevid) {
   10956 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10957 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10958 			/* 82577 */
   10959 			new_phytype = WMPHY_82577;
   10960 			break;
   10961 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10962 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10963 			/* 82578 */
   10964 			new_phytype = WMPHY_82578;
   10965 			break;
   10966 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10967 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10968 			/* 82579 */
   10969 			new_phytype = WMPHY_82579;
   10970 			break;
   10971 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10972 		case PCI_PRODUCT_INTEL_82801I_BM:
   10973 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10974 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10975 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10976 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10977 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10978 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10979 			/* ICH8, 9, 10 with 82567 */
   10980 			new_phytype = WMPHY_BM;
   10981 			break;
   10982 		default:
   10983 			break;
   10984 		}
   10985 	} else {
   10986 		/* It's not the first call. Use PHY OUI and model */
   10987 		switch (phy_oui) {
   10988 		case MII_OUI_ATTANSIC: /* atphy(4) */
   10989 			switch (phy_model) {
   10990 			case MII_MODEL_ATTANSIC_AR8021:
   10991 				new_phytype = WMPHY_82578;
   10992 				break;
   10993 			default:
   10994 				break;
   10995 			}
   10996 			break;
   10997 		case MII_OUI_xxMARVELL:
   10998 			switch (phy_model) {
   10999 			case MII_MODEL_xxMARVELL_I210:
   11000 				new_phytype = WMPHY_I210;
   11001 				break;
   11002 			case MII_MODEL_xxMARVELL_E1011:
   11003 			case MII_MODEL_xxMARVELL_E1000_3:
   11004 			case MII_MODEL_xxMARVELL_E1000_5:
   11005 			case MII_MODEL_xxMARVELL_E1112:
   11006 				new_phytype = WMPHY_M88;
   11007 				break;
   11008 			case MII_MODEL_xxMARVELL_E1149:
   11009 				new_phytype = WMPHY_BM;
   11010 				break;
   11011 			case MII_MODEL_xxMARVELL_E1111:
   11012 			case MII_MODEL_xxMARVELL_I347:
   11013 			case MII_MODEL_xxMARVELL_E1512:
   11014 			case MII_MODEL_xxMARVELL_E1340M:
   11015 			case MII_MODEL_xxMARVELL_E1543:
   11016 				new_phytype = WMPHY_M88;
   11017 				break;
   11018 			case MII_MODEL_xxMARVELL_I82563:
   11019 				new_phytype = WMPHY_GG82563;
   11020 				break;
   11021 			default:
   11022 				break;
   11023 			}
   11024 			break;
   11025 		case MII_OUI_INTEL:
   11026 			switch (phy_model) {
   11027 			case MII_MODEL_INTEL_I82577:
   11028 				new_phytype = WMPHY_82577;
   11029 				break;
   11030 			case MII_MODEL_INTEL_I82579:
   11031 				new_phytype = WMPHY_82579;
   11032 				break;
   11033 			case MII_MODEL_INTEL_I217:
   11034 				new_phytype = WMPHY_I217;
   11035 				break;
   11036 			case MII_MODEL_INTEL_I82580:
   11037 				new_phytype = WMPHY_82580;
   11038 				break;
   11039 			case MII_MODEL_INTEL_I350:
   11040 				new_phytype = WMPHY_I350;
   11041 				break;
   11042 			default:
   11043 				break;
   11044 			}
   11045 			break;
   11046 		case MII_OUI_yyINTEL:
   11047 			switch (phy_model) {
   11048 			case MII_MODEL_yyINTEL_I82562G:
   11049 			case MII_MODEL_yyINTEL_I82562EM:
   11050 			case MII_MODEL_yyINTEL_I82562ET:
   11051 				new_phytype = WMPHY_IFE;
   11052 				break;
   11053 			case MII_MODEL_yyINTEL_IGP01E1000:
   11054 				new_phytype = WMPHY_IGP;
   11055 				break;
   11056 			case MII_MODEL_yyINTEL_I82566:
   11057 				new_phytype = WMPHY_IGP_3;
   11058 				break;
   11059 			default:
   11060 				break;
   11061 			}
   11062 			break;
   11063 		default:
   11064 			break;
   11065 		}
   11066 
   11067 		if (dodiag) {
   11068 			if (new_phytype == WMPHY_UNKNOWN)
   11069 				aprint_verbose_dev(dev,
   11070 				    "%s: Unknown PHY model. OUI=%06x, "
   11071 				    "model=%04x\n", __func__, phy_oui,
   11072 				    phy_model);
   11073 
   11074 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11075 			    && (sc->sc_phytype != new_phytype)) {
   11076 				aprint_error_dev(dev, "Previously assumed PHY "
   11077 				    "type(%u) was incorrect. PHY type from PHY"
   11078 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   11079 			}
   11080 		}
   11081 	}
   11082 
   11083 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   11084 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   11085 		/* SGMII */
   11086 		new_readreg = wm_sgmii_readreg;
   11087 		new_writereg = wm_sgmii_writereg;
   11088 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11089 		/* BM2 (phyaddr == 1) */
   11090 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11091 		    && (new_phytype != WMPHY_BM)
   11092 		    && (new_phytype != WMPHY_UNKNOWN))
   11093 			doubt_phytype = new_phytype;
   11094 		new_phytype = WMPHY_BM;
   11095 		new_readreg = wm_gmii_bm_readreg;
   11096 		new_writereg = wm_gmii_bm_writereg;
   11097 	} else if (sc->sc_type >= WM_T_PCH) {
   11098 		/* All PCH* use _hv_ */
   11099 		new_readreg = wm_gmii_hv_readreg;
   11100 		new_writereg = wm_gmii_hv_writereg;
   11101 	} else if (sc->sc_type >= WM_T_ICH8) {
   11102 		/* non-82567 ICH8, 9 and 10 */
   11103 		new_readreg = wm_gmii_i82544_readreg;
   11104 		new_writereg = wm_gmii_i82544_writereg;
   11105 	} else if (sc->sc_type >= WM_T_80003) {
   11106 		/* 80003 */
   11107 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11108 		    && (new_phytype != WMPHY_GG82563)
   11109 		    && (new_phytype != WMPHY_UNKNOWN))
   11110 			doubt_phytype = new_phytype;
   11111 		new_phytype = WMPHY_GG82563;
   11112 		new_readreg = wm_gmii_i80003_readreg;
   11113 		new_writereg = wm_gmii_i80003_writereg;
   11114 	} else if (sc->sc_type >= WM_T_I210) {
   11115 		/* I210 and I211 */
   11116 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11117 		    && (new_phytype != WMPHY_I210)
   11118 		    && (new_phytype != WMPHY_UNKNOWN))
   11119 			doubt_phytype = new_phytype;
   11120 		new_phytype = WMPHY_I210;
   11121 		new_readreg = wm_gmii_gs40g_readreg;
   11122 		new_writereg = wm_gmii_gs40g_writereg;
   11123 	} else if (sc->sc_type >= WM_T_82580) {
   11124 		/* 82580, I350 and I354 */
   11125 		new_readreg = wm_gmii_82580_readreg;
   11126 		new_writereg = wm_gmii_82580_writereg;
   11127 	} else if (sc->sc_type >= WM_T_82544) {
   11128 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   11129 		new_readreg = wm_gmii_i82544_readreg;
   11130 		new_writereg = wm_gmii_i82544_writereg;
   11131 	} else {
   11132 		new_readreg = wm_gmii_i82543_readreg;
   11133 		new_writereg = wm_gmii_i82543_writereg;
   11134 	}
   11135 
   11136 	if (new_phytype == WMPHY_BM) {
   11137 		/* All BM use _bm_ */
   11138 		new_readreg = wm_gmii_bm_readreg;
   11139 		new_writereg = wm_gmii_bm_writereg;
   11140 	}
   11141 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   11142 		/* All PCH* use _hv_ */
   11143 		new_readreg = wm_gmii_hv_readreg;
   11144 		new_writereg = wm_gmii_hv_writereg;
   11145 	}
   11146 
   11147 	/* Diag output */
   11148 	if (dodiag) {
   11149 		if (doubt_phytype != WMPHY_UNKNOWN)
   11150 			aprint_error_dev(dev, "Assumed new PHY type was "
   11151 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   11152 			    new_phytype);
   11153 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11154 		    && (sc->sc_phytype != new_phytype))
   11155 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   11156 			    "was incorrect. New PHY type = %u\n",
   11157 			    sc->sc_phytype, new_phytype);
   11158 
   11159 		if ((mii->mii_readreg != NULL) &&
   11160 		    (new_phytype == WMPHY_UNKNOWN))
   11161 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   11162 
   11163 		if ((mii->mii_readreg != NULL) &&
   11164 		    (mii->mii_readreg != new_readreg))
   11165 			aprint_error_dev(dev, "Previously assumed PHY "
   11166 			    "read/write function was incorrect.\n");
   11167 	}
   11168 
   11169 	/* Update now */
   11170 	sc->sc_phytype = new_phytype;
   11171 	mii->mii_readreg = new_readreg;
   11172 	mii->mii_writereg = new_writereg;
   11173 	if (new_readreg == wm_gmii_hv_readreg) {
   11174 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   11175 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   11176 	} else if (new_readreg == wm_sgmii_readreg) {
   11177 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   11178 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   11179 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   11180 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   11181 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   11182 	}
   11183 }
   11184 
   11185 /*
   11186  * wm_get_phy_id_82575:
   11187  *
   11188  * Return PHY ID. Return -1 if it failed.
   11189  */
   11190 static int
   11191 wm_get_phy_id_82575(struct wm_softc *sc)
   11192 {
   11193 	uint32_t reg;
   11194 	int phyid = -1;
   11195 
   11196 	/* XXX */
   11197 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11198 		return -1;
   11199 
   11200 	if (wm_sgmii_uses_mdio(sc)) {
   11201 		switch (sc->sc_type) {
   11202 		case WM_T_82575:
   11203 		case WM_T_82576:
   11204 			reg = CSR_READ(sc, WMREG_MDIC);
   11205 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   11206 			break;
   11207 		case WM_T_82580:
   11208 		case WM_T_I350:
   11209 		case WM_T_I354:
   11210 		case WM_T_I210:
   11211 		case WM_T_I211:
   11212 			reg = CSR_READ(sc, WMREG_MDICNFG);
   11213 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   11214 			break;
   11215 		default:
   11216 			return -1;
   11217 		}
   11218 	}
   11219 
   11220 	return phyid;
   11221 }
   11222 
   11223 /*
   11224  * wm_gmii_mediainit:
   11225  *
   11226  *	Initialize media for use on 1000BASE-T devices.
   11227  */
   11228 static void
   11229 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   11230 {
   11231 	device_t dev = sc->sc_dev;
   11232 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11233 	struct mii_data *mii = &sc->sc_mii;
   11234 
   11235 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11236 		device_xname(sc->sc_dev), __func__));
   11237 
   11238 	/* We have GMII. */
   11239 	sc->sc_flags |= WM_F_HAS_MII;
   11240 
   11241 	if (sc->sc_type == WM_T_80003)
   11242 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11243 	else
   11244 		sc->sc_tipg = TIPG_1000T_DFLT;
   11245 
   11246 	/*
   11247 	 * Let the chip set speed/duplex on its own based on
   11248 	 * signals from the PHY.
   11249 	 * XXXbouyer - I'm not sure this is right for the 80003,
   11250 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   11251 	 */
   11252 	sc->sc_ctrl |= CTRL_SLU;
   11253 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11254 
   11255 	/* Initialize our media structures and probe the GMII. */
   11256 	mii->mii_ifp = ifp;
   11257 
   11258 	mii->mii_statchg = wm_gmii_statchg;
   11259 
   11260 	/* get PHY control from SMBus to PCIe */
   11261 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   11262 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   11263 	    || (sc->sc_type == WM_T_PCH_CNP))
   11264 		wm_init_phy_workarounds_pchlan(sc);
   11265 
   11266 	wm_gmii_reset(sc);
   11267 
   11268 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11269 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   11270 	    wm_gmii_mediastatus, sc->sc_core_lock);
   11271 
   11272 	/* Setup internal SGMII PHY for SFP */
   11273 	wm_sgmii_sfp_preconfig(sc);
   11274 
   11275 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   11276 	    || (sc->sc_type == WM_T_82580)
   11277 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   11278 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   11279 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   11280 			/* Attach only one port */
   11281 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   11282 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11283 		} else {
   11284 			int i, id;
   11285 			uint32_t ctrl_ext;
   11286 
   11287 			id = wm_get_phy_id_82575(sc);
   11288 			if (id != -1) {
   11289 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   11290 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   11291 			}
   11292 			if ((id == -1)
   11293 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11294 				/* Power on sgmii phy if it is disabled */
   11295 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11296 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   11297 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   11298 				CSR_WRITE_FLUSH(sc);
   11299 				delay(300*1000); /* XXX too long */
   11300 
   11301 				/*
   11302 				 * From 1 to 8.
   11303 				 *
   11304 				 * I2C access fails with I2C register's ERROR
   11305 				 * bit set, so prevent error message while
   11306 				 * scanning.
   11307 				 */
   11308 				sc->phy.no_errprint = true;
   11309 				for (i = 1; i < 8; i++)
   11310 					mii_attach(sc->sc_dev, &sc->sc_mii,
   11311 					    0xffffffff, i, MII_OFFSET_ANY,
   11312 					    MIIF_DOPAUSE);
   11313 				sc->phy.no_errprint = false;
   11314 
   11315 				/* Restore previous sfp cage power state */
   11316 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11317 			}
   11318 		}
   11319 	} else
   11320 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11321 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11322 
   11323 	/*
   11324 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   11325 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   11326 	 */
   11327 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   11328 		|| (sc->sc_type == WM_T_PCH_SPT)
   11329 		|| (sc->sc_type == WM_T_PCH_CNP))
   11330 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11331 		wm_set_mdio_slow_mode_hv(sc);
   11332 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11333 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11334 	}
   11335 
   11336 	/*
   11337 	 * (For ICH8 variants)
   11338 	 * If PHY detection failed, use BM's r/w function and retry.
   11339 	 */
   11340 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11341 		/* if failed, retry with *_bm_* */
   11342 		aprint_verbose_dev(dev, "Assumed PHY access function "
   11343 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   11344 		    sc->sc_phytype);
   11345 		sc->sc_phytype = WMPHY_BM;
   11346 		mii->mii_readreg = wm_gmii_bm_readreg;
   11347 		mii->mii_writereg = wm_gmii_bm_writereg;
   11348 
   11349 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11350 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11351 	}
   11352 
   11353 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11354 		/* Any PHY wasn't found */
   11355 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   11356 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   11357 		sc->sc_phytype = WMPHY_NONE;
   11358 	} else {
   11359 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   11360 
   11361 		/*
   11362 		 * PHY found! Check PHY type again by the second call of
   11363 		 * wm_gmii_setup_phytype.
   11364 		 */
   11365 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   11366 		    child->mii_mpd_model);
   11367 
   11368 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   11369 	}
   11370 }
   11371 
   11372 /*
   11373  * wm_gmii_mediachange:	[ifmedia interface function]
   11374  *
   11375  *	Set hardware to newly-selected media on a 1000BASE-T device.
   11376  */
   11377 static int
   11378 wm_gmii_mediachange(struct ifnet *ifp)
   11379 {
   11380 	struct wm_softc *sc = ifp->if_softc;
   11381 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11382 	uint32_t reg;
   11383 	int rc;
   11384 
   11385 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11386 		device_xname(sc->sc_dev), __func__));
   11387 
   11388 	KASSERT(WM_CORE_LOCKED(sc));
   11389 
   11390 	if ((sc->sc_if_flags & IFF_UP) == 0)
   11391 		return 0;
   11392 
   11393 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   11394 	if ((sc->sc_type == WM_T_82580)
   11395 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   11396 	    || (sc->sc_type == WM_T_I211)) {
   11397 		reg = CSR_READ(sc, WMREG_PHPM);
   11398 		reg &= ~PHPM_GO_LINK_D;
   11399 		CSR_WRITE(sc, WMREG_PHPM, reg);
   11400 	}
   11401 
   11402 	/* Disable D0 LPLU. */
   11403 	wm_lplu_d0_disable(sc);
   11404 
   11405 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   11406 	sc->sc_ctrl |= CTRL_SLU;
   11407 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11408 	    || (sc->sc_type > WM_T_82543)) {
   11409 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   11410 	} else {
   11411 		sc->sc_ctrl &= ~CTRL_ASDE;
   11412 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11413 		if (ife->ifm_media & IFM_FDX)
   11414 			sc->sc_ctrl |= CTRL_FD;
   11415 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   11416 		case IFM_10_T:
   11417 			sc->sc_ctrl |= CTRL_SPEED_10;
   11418 			break;
   11419 		case IFM_100_TX:
   11420 			sc->sc_ctrl |= CTRL_SPEED_100;
   11421 			break;
   11422 		case IFM_1000_T:
   11423 			sc->sc_ctrl |= CTRL_SPEED_1000;
   11424 			break;
   11425 		case IFM_NONE:
   11426 			/* There is no specific setting for IFM_NONE */
   11427 			break;
   11428 		default:
   11429 			panic("wm_gmii_mediachange: bad media 0x%x",
   11430 			    ife->ifm_media);
   11431 		}
   11432 	}
   11433 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11434 	CSR_WRITE_FLUSH(sc);
   11435 
   11436 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11437 		wm_serdes_mediachange(ifp);
   11438 
   11439 	if (sc->sc_type <= WM_T_82543)
   11440 		wm_gmii_reset(sc);
   11441 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   11442 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   11443 		/* allow time for SFP cage time to power up phy */
   11444 		delay(300 * 1000);
   11445 		wm_gmii_reset(sc);
   11446 	}
   11447 
   11448 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   11449 		return 0;
   11450 	return rc;
   11451 }
   11452 
   11453 /*
   11454  * wm_gmii_mediastatus:	[ifmedia interface function]
   11455  *
   11456  *	Get the current interface media status on a 1000BASE-T device.
   11457  */
   11458 static void
   11459 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11460 {
   11461 	struct wm_softc *sc = ifp->if_softc;
   11462 
   11463 	KASSERT(WM_CORE_LOCKED(sc));
   11464 
   11465 	ether_mediastatus(ifp, ifmr);
   11466 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11467 	    | sc->sc_flowflags;
   11468 }
   11469 
   11470 #define	MDI_IO		CTRL_SWDPIN(2)
   11471 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   11472 #define	MDI_CLK		CTRL_SWDPIN(3)
   11473 
   11474 static void
   11475 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   11476 {
   11477 	uint32_t i, v;
   11478 
   11479 	v = CSR_READ(sc, WMREG_CTRL);
   11480 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11481 	v |= MDI_DIR | CTRL_SWDPIO(3);
   11482 
   11483 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   11484 		if (data & i)
   11485 			v |= MDI_IO;
   11486 		else
   11487 			v &= ~MDI_IO;
   11488 		CSR_WRITE(sc, WMREG_CTRL, v);
   11489 		CSR_WRITE_FLUSH(sc);
   11490 		delay(10);
   11491 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11492 		CSR_WRITE_FLUSH(sc);
   11493 		delay(10);
   11494 		CSR_WRITE(sc, WMREG_CTRL, v);
   11495 		CSR_WRITE_FLUSH(sc);
   11496 		delay(10);
   11497 	}
   11498 }
   11499 
   11500 static uint16_t
   11501 wm_i82543_mii_recvbits(struct wm_softc *sc)
   11502 {
   11503 	uint32_t v, i;
   11504 	uint16_t data = 0;
   11505 
   11506 	v = CSR_READ(sc, WMREG_CTRL);
   11507 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11508 	v |= CTRL_SWDPIO(3);
   11509 
   11510 	CSR_WRITE(sc, WMREG_CTRL, v);
   11511 	CSR_WRITE_FLUSH(sc);
   11512 	delay(10);
   11513 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11514 	CSR_WRITE_FLUSH(sc);
   11515 	delay(10);
   11516 	CSR_WRITE(sc, WMREG_CTRL, v);
   11517 	CSR_WRITE_FLUSH(sc);
   11518 	delay(10);
   11519 
   11520 	for (i = 0; i < 16; i++) {
   11521 		data <<= 1;
   11522 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11523 		CSR_WRITE_FLUSH(sc);
   11524 		delay(10);
   11525 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   11526 			data |= 1;
   11527 		CSR_WRITE(sc, WMREG_CTRL, v);
   11528 		CSR_WRITE_FLUSH(sc);
   11529 		delay(10);
   11530 	}
   11531 
   11532 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11533 	CSR_WRITE_FLUSH(sc);
   11534 	delay(10);
   11535 	CSR_WRITE(sc, WMREG_CTRL, v);
   11536 	CSR_WRITE_FLUSH(sc);
   11537 	delay(10);
   11538 
   11539 	return data;
   11540 }
   11541 
   11542 #undef MDI_IO
   11543 #undef MDI_DIR
   11544 #undef MDI_CLK
   11545 
   11546 /*
   11547  * wm_gmii_i82543_readreg:	[mii interface function]
   11548  *
   11549  *	Read a PHY register on the GMII (i82543 version).
   11550  */
   11551 static int
   11552 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11553 {
   11554 	struct wm_softc *sc = device_private(dev);
   11555 
   11556 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11557 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   11558 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   11559 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   11560 
   11561 	DPRINTF(sc, WM_DEBUG_GMII,
   11562 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   11563 		device_xname(dev), phy, reg, *val));
   11564 
   11565 	return 0;
   11566 }
   11567 
   11568 /*
   11569  * wm_gmii_i82543_writereg:	[mii interface function]
   11570  *
   11571  *	Write a PHY register on the GMII (i82543 version).
   11572  */
   11573 static int
   11574 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   11575 {
   11576 	struct wm_softc *sc = device_private(dev);
   11577 
   11578 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11579 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   11580 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   11581 	    (MII_COMMAND_START << 30), 32);
   11582 
   11583 	return 0;
   11584 }
   11585 
   11586 /*
   11587  * wm_gmii_mdic_readreg:	[mii interface function]
   11588  *
   11589  *	Read a PHY register on the GMII.
   11590  */
   11591 static int
   11592 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11593 {
   11594 	struct wm_softc *sc = device_private(dev);
   11595 	uint32_t mdic = 0;
   11596 	int i;
   11597 
   11598 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11599 	    && (reg > MII_ADDRMASK)) {
   11600 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11601 		    __func__, sc->sc_phytype, reg);
   11602 		reg &= MII_ADDRMASK;
   11603 	}
   11604 
   11605 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   11606 	    MDIC_REGADD(reg));
   11607 
   11608 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11609 		delay(50);
   11610 		mdic = CSR_READ(sc, WMREG_MDIC);
   11611 		if (mdic & MDIC_READY)
   11612 			break;
   11613 	}
   11614 
   11615 	if ((mdic & MDIC_READY) == 0) {
   11616 		DPRINTF(sc, WM_DEBUG_GMII,
   11617 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11618 			device_xname(dev), phy, reg));
   11619 		return ETIMEDOUT;
   11620 	} else if (mdic & MDIC_E) {
   11621 		/* This is normal if no PHY is present. */
   11622 		DPRINTF(sc, WM_DEBUG_GMII,
   11623 		    ("%s: MDIC read error: phy %d reg %d\n",
   11624 			device_xname(sc->sc_dev), phy, reg));
   11625 		return -1;
   11626 	} else
   11627 		*val = MDIC_DATA(mdic);
   11628 
   11629 	/*
   11630 	 * Allow some time after each MDIC transaction to avoid
   11631 	 * reading duplicate data in the next MDIC transaction.
   11632 	 */
   11633 	if (sc->sc_type == WM_T_PCH2)
   11634 		delay(100);
   11635 
   11636 	return 0;
   11637 }
   11638 
   11639 /*
   11640  * wm_gmii_mdic_writereg:	[mii interface function]
   11641  *
   11642  *	Write a PHY register on the GMII.
   11643  */
   11644 static int
   11645 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11646 {
   11647 	struct wm_softc *sc = device_private(dev);
   11648 	uint32_t mdic = 0;
   11649 	int i;
   11650 
   11651 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11652 	    && (reg > MII_ADDRMASK)) {
   11653 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11654 		    __func__, sc->sc_phytype, reg);
   11655 		reg &= MII_ADDRMASK;
   11656 	}
   11657 
   11658 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11659 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11660 
   11661 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11662 		delay(50);
   11663 		mdic = CSR_READ(sc, WMREG_MDIC);
   11664 		if (mdic & MDIC_READY)
   11665 			break;
   11666 	}
   11667 
   11668 	if ((mdic & MDIC_READY) == 0) {
   11669 		DPRINTF(sc, WM_DEBUG_GMII,
   11670 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11671 			device_xname(dev), phy, reg));
   11672 		return ETIMEDOUT;
   11673 	} else if (mdic & MDIC_E) {
   11674 		DPRINTF(sc, WM_DEBUG_GMII,
   11675 		    ("%s: MDIC write error: phy %d reg %d\n",
   11676 			device_xname(dev), phy, reg));
   11677 		return -1;
   11678 	}
   11679 
   11680 	/*
   11681 	 * Allow some time after each MDIC transaction to avoid
   11682 	 * reading duplicate data in the next MDIC transaction.
   11683 	 */
   11684 	if (sc->sc_type == WM_T_PCH2)
   11685 		delay(100);
   11686 
   11687 	return 0;
   11688 }
   11689 
   11690 /*
   11691  * wm_gmii_i82544_readreg:	[mii interface function]
   11692  *
   11693  *	Read a PHY register on the GMII.
   11694  */
   11695 static int
   11696 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11697 {
   11698 	struct wm_softc *sc = device_private(dev);
   11699 	int rv;
   11700 
   11701 	rv = sc->phy.acquire(sc);
   11702 	if (rv != 0) {
   11703 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11704 		return rv;
   11705 	}
   11706 
   11707 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11708 
   11709 	sc->phy.release(sc);
   11710 
   11711 	return rv;
   11712 }
   11713 
   11714 static int
   11715 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11716 {
   11717 	struct wm_softc *sc = device_private(dev);
   11718 	int rv;
   11719 
   11720 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11721 		switch (sc->sc_phytype) {
   11722 		case WMPHY_IGP:
   11723 		case WMPHY_IGP_2:
   11724 		case WMPHY_IGP_3:
   11725 			rv = wm_gmii_mdic_writereg(dev, phy,
   11726 			    IGPHY_PAGE_SELECT, reg);
   11727 			if (rv != 0)
   11728 				return rv;
   11729 			break;
   11730 		default:
   11731 #ifdef WM_DEBUG
   11732 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11733 			    __func__, sc->sc_phytype, reg);
   11734 #endif
   11735 			break;
   11736 		}
   11737 	}
   11738 
   11739 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11740 }
   11741 
   11742 /*
   11743  * wm_gmii_i82544_writereg:	[mii interface function]
   11744  *
   11745  *	Write a PHY register on the GMII.
   11746  */
   11747 static int
   11748 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11749 {
   11750 	struct wm_softc *sc = device_private(dev);
   11751 	int rv;
   11752 
   11753 	rv = sc->phy.acquire(sc);
   11754 	if (rv != 0) {
   11755 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11756 		return rv;
   11757 	}
   11758 
   11759 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11760 	sc->phy.release(sc);
   11761 
   11762 	return rv;
   11763 }
   11764 
   11765 static int
   11766 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11767 {
   11768 	struct wm_softc *sc = device_private(dev);
   11769 	int rv;
   11770 
   11771 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11772 		switch (sc->sc_phytype) {
   11773 		case WMPHY_IGP:
   11774 		case WMPHY_IGP_2:
   11775 		case WMPHY_IGP_3:
   11776 			rv = wm_gmii_mdic_writereg(dev, phy,
   11777 			    IGPHY_PAGE_SELECT, reg);
   11778 			if (rv != 0)
   11779 				return rv;
   11780 			break;
   11781 		default:
   11782 #ifdef WM_DEBUG
   11783 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11784 			    __func__, sc->sc_phytype, reg);
   11785 #endif
   11786 			break;
   11787 		}
   11788 	}
   11789 
   11790 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11791 }
   11792 
   11793 /*
   11794  * wm_gmii_i80003_readreg:	[mii interface function]
   11795  *
   11796  *	Read a PHY register on the kumeran
   11797  * This could be handled by the PHY layer if we didn't have to lock the
   11798  * resource ...
   11799  */
   11800 static int
   11801 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11802 {
   11803 	struct wm_softc *sc = device_private(dev);
   11804 	int page_select;
   11805 	uint16_t temp, temp2;
   11806 	int rv;
   11807 
   11808 	if (phy != 1) /* Only one PHY on kumeran bus */
   11809 		return -1;
   11810 
   11811 	rv = sc->phy.acquire(sc);
   11812 	if (rv != 0) {
   11813 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11814 		return rv;
   11815 	}
   11816 
   11817 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11818 		page_select = GG82563_PHY_PAGE_SELECT;
   11819 	else {
   11820 		/*
   11821 		 * Use Alternative Page Select register to access registers
   11822 		 * 30 and 31.
   11823 		 */
   11824 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11825 	}
   11826 	temp = reg >> GG82563_PAGE_SHIFT;
   11827 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11828 		goto out;
   11829 
   11830 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11831 		/*
   11832 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11833 		 * register.
   11834 		 */
   11835 		delay(200);
   11836 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11837 		if ((rv != 0) || (temp2 != temp)) {
   11838 			device_printf(dev, "%s failed\n", __func__);
   11839 			rv = -1;
   11840 			goto out;
   11841 		}
   11842 		delay(200);
   11843 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11844 		delay(200);
   11845 	} else
   11846 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11847 
   11848 out:
   11849 	sc->phy.release(sc);
   11850 	return rv;
   11851 }
   11852 
   11853 /*
   11854  * wm_gmii_i80003_writereg:	[mii interface function]
   11855  *
   11856  *	Write a PHY register on the kumeran.
   11857  * This could be handled by the PHY layer if we didn't have to lock the
   11858  * resource ...
   11859  */
   11860 static int
   11861 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11862 {
   11863 	struct wm_softc *sc = device_private(dev);
   11864 	int page_select, rv;
   11865 	uint16_t temp, temp2;
   11866 
   11867 	if (phy != 1) /* Only one PHY on kumeran bus */
   11868 		return -1;
   11869 
   11870 	rv = sc->phy.acquire(sc);
   11871 	if (rv != 0) {
   11872 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11873 		return rv;
   11874 	}
   11875 
   11876 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11877 		page_select = GG82563_PHY_PAGE_SELECT;
   11878 	else {
   11879 		/*
   11880 		 * Use Alternative Page Select register to access registers
   11881 		 * 30 and 31.
   11882 		 */
   11883 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11884 	}
   11885 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11886 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11887 		goto out;
   11888 
   11889 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11890 		/*
   11891 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11892 		 * register.
   11893 		 */
   11894 		delay(200);
   11895 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11896 		if ((rv != 0) || (temp2 != temp)) {
   11897 			device_printf(dev, "%s failed\n", __func__);
   11898 			rv = -1;
   11899 			goto out;
   11900 		}
   11901 		delay(200);
   11902 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11903 		delay(200);
   11904 	} else
   11905 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11906 
   11907 out:
   11908 	sc->phy.release(sc);
   11909 	return rv;
   11910 }
   11911 
   11912 /*
   11913  * wm_gmii_bm_readreg:	[mii interface function]
   11914  *
   11915  *	Read a PHY register on the kumeran
   11916  * This could be handled by the PHY layer if we didn't have to lock the
   11917  * resource ...
   11918  */
   11919 static int
   11920 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11921 {
   11922 	struct wm_softc *sc = device_private(dev);
   11923 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11924 	int rv;
   11925 
   11926 	rv = sc->phy.acquire(sc);
   11927 	if (rv != 0) {
   11928 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11929 		return rv;
   11930 	}
   11931 
   11932 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11933 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11934 		    || (reg == 31)) ? 1 : phy;
   11935 	/* Page 800 works differently than the rest so it has its own func */
   11936 	if (page == BM_WUC_PAGE) {
   11937 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11938 		goto release;
   11939 	}
   11940 
   11941 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11942 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11943 		    && (sc->sc_type != WM_T_82583))
   11944 			rv = wm_gmii_mdic_writereg(dev, phy,
   11945 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11946 		else
   11947 			rv = wm_gmii_mdic_writereg(dev, phy,
   11948 			    BME1000_PHY_PAGE_SELECT, page);
   11949 		if (rv != 0)
   11950 			goto release;
   11951 	}
   11952 
   11953 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11954 
   11955 release:
   11956 	sc->phy.release(sc);
   11957 	return rv;
   11958 }
   11959 
   11960 /*
   11961  * wm_gmii_bm_writereg:	[mii interface function]
   11962  *
   11963  *	Write a PHY register on the kumeran.
   11964  * This could be handled by the PHY layer if we didn't have to lock the
   11965  * resource ...
   11966  */
   11967 static int
   11968 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11969 {
   11970 	struct wm_softc *sc = device_private(dev);
   11971 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11972 	int rv;
   11973 
   11974 	rv = sc->phy.acquire(sc);
   11975 	if (rv != 0) {
   11976 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11977 		return rv;
   11978 	}
   11979 
   11980 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11981 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11982 		    || (reg == 31)) ? 1 : phy;
   11983 	/* Page 800 works differently than the rest so it has its own func */
   11984 	if (page == BM_WUC_PAGE) {
   11985 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11986 		goto release;
   11987 	}
   11988 
   11989 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11990 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11991 		    && (sc->sc_type != WM_T_82583))
   11992 			rv = wm_gmii_mdic_writereg(dev, phy,
   11993 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11994 		else
   11995 			rv = wm_gmii_mdic_writereg(dev, phy,
   11996 			    BME1000_PHY_PAGE_SELECT, page);
   11997 		if (rv != 0)
   11998 			goto release;
   11999 	}
   12000 
   12001 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12002 
   12003 release:
   12004 	sc->phy.release(sc);
   12005 	return rv;
   12006 }
   12007 
   12008 /*
   12009  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   12010  *  @dev: pointer to the HW structure
   12011  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   12012  *
   12013  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   12014  *  address to store contents of the BM_WUC_ENABLE_REG register.
   12015  */
   12016 static int
   12017 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12018 {
   12019 #ifdef WM_DEBUG
   12020 	struct wm_softc *sc = device_private(dev);
   12021 #endif
   12022 	uint16_t temp;
   12023 	int rv;
   12024 
   12025 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12026 		device_xname(dev), __func__));
   12027 
   12028 	if (!phy_regp)
   12029 		return -1;
   12030 
   12031 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   12032 
   12033 	/* Select Port Control Registers page */
   12034 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12035 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12036 	if (rv != 0)
   12037 		return rv;
   12038 
   12039 	/* Read WUCE and save it */
   12040 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   12041 	if (rv != 0)
   12042 		return rv;
   12043 
   12044 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   12045 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   12046 	 */
   12047 	temp = *phy_regp;
   12048 	temp |= BM_WUC_ENABLE_BIT;
   12049 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   12050 
   12051 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   12052 		return rv;
   12053 
   12054 	/* Select Host Wakeup Registers page - caller now able to write
   12055 	 * registers on the Wakeup registers page
   12056 	 */
   12057 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12058 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   12059 }
   12060 
   12061 /*
   12062  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   12063  *  @dev: pointer to the HW structure
   12064  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   12065  *
   12066  *  Restore BM_WUC_ENABLE_REG to its original value.
   12067  *
   12068  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   12069  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   12070  *  caller.
   12071  */
   12072 static int
   12073 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12074 {
   12075 #ifdef WM_DEBUG
   12076 	struct wm_softc *sc = device_private(dev);
   12077 #endif
   12078 
   12079 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12080 		device_xname(dev), __func__));
   12081 
   12082 	if (!phy_regp)
   12083 		return -1;
   12084 
   12085 	/* Select Port Control Registers page */
   12086 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12087 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12088 
   12089 	/* Restore 769.17 to its original value */
   12090 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   12091 
   12092 	return 0;
   12093 }
   12094 
   12095 /*
   12096  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   12097  *  @sc: pointer to the HW structure
   12098  *  @offset: register offset to be read or written
   12099  *  @val: pointer to the data to read or write
   12100  *  @rd: determines if operation is read or write
   12101  *  @page_set: BM_WUC_PAGE already set and access enabled
   12102  *
   12103  *  Read the PHY register at offset and store the retrieved information in
   12104  *  data, or write data to PHY register at offset.  Note the procedure to
   12105  *  access the PHY wakeup registers is different than reading the other PHY
   12106  *  registers. It works as such:
   12107  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   12108  *  2) Set page to 800 for host (801 if we were manageability)
   12109  *  3) Write the address using the address opcode (0x11)
   12110  *  4) Read or write the data using the data opcode (0x12)
   12111  *  5) Restore 769.17.2 to its original value
   12112  *
   12113  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   12114  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   12115  *
   12116  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   12117  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   12118  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   12119  */
   12120 static int
   12121 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   12122 	bool page_set)
   12123 {
   12124 	struct wm_softc *sc = device_private(dev);
   12125 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   12126 	uint16_t page = BM_PHY_REG_PAGE(offset);
   12127 	uint16_t wuce;
   12128 	int rv = 0;
   12129 
   12130 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12131 		device_xname(dev), __func__));
   12132 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   12133 	if ((sc->sc_type == WM_T_PCH)
   12134 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   12135 		device_printf(dev,
   12136 		    "Attempting to access page %d while gig enabled.\n", page);
   12137 	}
   12138 
   12139 	if (!page_set) {
   12140 		/* Enable access to PHY wakeup registers */
   12141 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   12142 		if (rv != 0) {
   12143 			device_printf(dev,
   12144 			    "%s: Could not enable PHY wakeup reg access\n",
   12145 			    __func__);
   12146 			return rv;
   12147 		}
   12148 	}
   12149 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   12150 		device_xname(sc->sc_dev), __func__, page, regnum));
   12151 
   12152 	/*
   12153 	 * 2) Access PHY wakeup register.
   12154 	 * See wm_access_phy_wakeup_reg_bm.
   12155 	 */
   12156 
   12157 	/* Write the Wakeup register page offset value using opcode 0x11 */
   12158 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   12159 	if (rv != 0)
   12160 		return rv;
   12161 
   12162 	if (rd) {
   12163 		/* Read the Wakeup register page value using opcode 0x12 */
   12164 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   12165 	} else {
   12166 		/* Write the Wakeup register page value using opcode 0x12 */
   12167 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   12168 	}
   12169 	if (rv != 0)
   12170 		return rv;
   12171 
   12172 	if (!page_set)
   12173 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   12174 
   12175 	return rv;
   12176 }
   12177 
   12178 /*
   12179  * wm_gmii_hv_readreg:	[mii interface function]
   12180  *
   12181  *	Read a PHY register on the kumeran
   12182  * This could be handled by the PHY layer if we didn't have to lock the
   12183  * resource ...
   12184  */
   12185 static int
   12186 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12187 {
   12188 	struct wm_softc *sc = device_private(dev);
   12189 	int rv;
   12190 
   12191 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12192 		device_xname(dev), __func__));
   12193 
   12194 	rv = sc->phy.acquire(sc);
   12195 	if (rv != 0) {
   12196 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12197 		return rv;
   12198 	}
   12199 
   12200 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   12201 	sc->phy.release(sc);
   12202 	return rv;
   12203 }
   12204 
   12205 static int
   12206 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12207 {
   12208 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12209 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12210 	int rv;
   12211 
   12212 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12213 
   12214 	/* Page 800 works differently than the rest so it has its own func */
   12215 	if (page == BM_WUC_PAGE)
   12216 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12217 
   12218 	/*
   12219 	 * Lower than page 768 works differently than the rest so it has its
   12220 	 * own func
   12221 	 */
   12222 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12223 		device_printf(dev, "gmii_hv_readreg!!!\n");
   12224 		return -1;
   12225 	}
   12226 
   12227 	/*
   12228 	 * XXX I21[789] documents say that the SMBus Address register is at
   12229 	 * PHY address 01, Page 0 (not 768), Register 26.
   12230 	 */
   12231 	if (page == HV_INTC_FC_PAGE_START)
   12232 		page = 0;
   12233 
   12234 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12235 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12236 		    page << BME1000_PAGE_SHIFT);
   12237 		if (rv != 0)
   12238 			return rv;
   12239 	}
   12240 
   12241 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   12242 }
   12243 
   12244 /*
   12245  * wm_gmii_hv_writereg:	[mii interface function]
   12246  *
   12247  *	Write a PHY register on the kumeran.
   12248  * This could be handled by the PHY layer if we didn't have to lock the
   12249  * resource ...
   12250  */
   12251 static int
   12252 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   12253 {
   12254 	struct wm_softc *sc = device_private(dev);
   12255 	int rv;
   12256 
   12257 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12258 		device_xname(dev), __func__));
   12259 
   12260 	rv = sc->phy.acquire(sc);
   12261 	if (rv != 0) {
   12262 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12263 		return rv;
   12264 	}
   12265 
   12266 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   12267 	sc->phy.release(sc);
   12268 
   12269 	return rv;
   12270 }
   12271 
   12272 static int
   12273 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12274 {
   12275 	struct wm_softc *sc = device_private(dev);
   12276 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12277 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12278 	int rv;
   12279 
   12280 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12281 
   12282 	/* Page 800 works differently than the rest so it has its own func */
   12283 	if (page == BM_WUC_PAGE)
   12284 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   12285 		    false);
   12286 
   12287 	/*
   12288 	 * Lower than page 768 works differently than the rest so it has its
   12289 	 * own func
   12290 	 */
   12291 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12292 		device_printf(dev, "gmii_hv_writereg!!!\n");
   12293 		return -1;
   12294 	}
   12295 
   12296 	{
   12297 		/*
   12298 		 * XXX I21[789] documents say that the SMBus Address register
   12299 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   12300 		 */
   12301 		if (page == HV_INTC_FC_PAGE_START)
   12302 			page = 0;
   12303 
   12304 		/*
   12305 		 * XXX Workaround MDIO accesses being disabled after entering
   12306 		 * IEEE Power Down (whenever bit 11 of the PHY control
   12307 		 * register is set)
   12308 		 */
   12309 		if (sc->sc_phytype == WMPHY_82578) {
   12310 			struct mii_softc *child;
   12311 
   12312 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12313 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   12314 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   12315 			    && ((val & (1 << 11)) != 0)) {
   12316 				device_printf(dev, "XXX need workaround\n");
   12317 			}
   12318 		}
   12319 
   12320 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12321 			rv = wm_gmii_mdic_writereg(dev, 1,
   12322 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12323 			if (rv != 0)
   12324 				return rv;
   12325 		}
   12326 	}
   12327 
   12328 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   12329 }
   12330 
   12331 /*
   12332  * wm_gmii_82580_readreg:	[mii interface function]
   12333  *
   12334  *	Read a PHY register on the 82580 and I350.
   12335  * This could be handled by the PHY layer if we didn't have to lock the
   12336  * resource ...
   12337  */
   12338 static int
   12339 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12340 {
   12341 	struct wm_softc *sc = device_private(dev);
   12342 	int rv;
   12343 
   12344 	rv = sc->phy.acquire(sc);
   12345 	if (rv != 0) {
   12346 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12347 		return rv;
   12348 	}
   12349 
   12350 #ifdef DIAGNOSTIC
   12351 	if (reg > MII_ADDRMASK) {
   12352 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12353 		    __func__, sc->sc_phytype, reg);
   12354 		reg &= MII_ADDRMASK;
   12355 	}
   12356 #endif
   12357 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   12358 
   12359 	sc->phy.release(sc);
   12360 	return rv;
   12361 }
   12362 
   12363 /*
   12364  * wm_gmii_82580_writereg:	[mii interface function]
   12365  *
   12366  *	Write a PHY register on the 82580 and I350.
   12367  * This could be handled by the PHY layer if we didn't have to lock the
   12368  * resource ...
   12369  */
   12370 static int
   12371 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   12372 {
   12373 	struct wm_softc *sc = device_private(dev);
   12374 	int rv;
   12375 
   12376 	rv = sc->phy.acquire(sc);
   12377 	if (rv != 0) {
   12378 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12379 		return rv;
   12380 	}
   12381 
   12382 #ifdef DIAGNOSTIC
   12383 	if (reg > MII_ADDRMASK) {
   12384 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12385 		    __func__, sc->sc_phytype, reg);
   12386 		reg &= MII_ADDRMASK;
   12387 	}
   12388 #endif
   12389 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   12390 
   12391 	sc->phy.release(sc);
   12392 	return rv;
   12393 }
   12394 
   12395 /*
   12396  * wm_gmii_gs40g_readreg:	[mii interface function]
   12397  *
   12398  *	Read a PHY register on the I2100 and I211.
   12399  * This could be handled by the PHY layer if we didn't have to lock the
   12400  * resource ...
   12401  */
   12402 static int
   12403 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12404 {
   12405 	struct wm_softc *sc = device_private(dev);
   12406 	int page, offset;
   12407 	int rv;
   12408 
   12409 	/* Acquire semaphore */
   12410 	rv = sc->phy.acquire(sc);
   12411 	if (rv != 0) {
   12412 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12413 		return rv;
   12414 	}
   12415 
   12416 	/* Page select */
   12417 	page = reg >> GS40G_PAGE_SHIFT;
   12418 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12419 	if (rv != 0)
   12420 		goto release;
   12421 
   12422 	/* Read reg */
   12423 	offset = reg & GS40G_OFFSET_MASK;
   12424 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   12425 
   12426 release:
   12427 	sc->phy.release(sc);
   12428 	return rv;
   12429 }
   12430 
   12431 /*
   12432  * wm_gmii_gs40g_writereg:	[mii interface function]
   12433  *
   12434  *	Write a PHY register on the I210 and I211.
   12435  * This could be handled by the PHY layer if we didn't have to lock the
   12436  * resource ...
   12437  */
   12438 static int
   12439 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   12440 {
   12441 	struct wm_softc *sc = device_private(dev);
   12442 	uint16_t page;
   12443 	int offset, rv;
   12444 
   12445 	/* Acquire semaphore */
   12446 	rv = sc->phy.acquire(sc);
   12447 	if (rv != 0) {
   12448 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12449 		return rv;
   12450 	}
   12451 
   12452 	/* Page select */
   12453 	page = reg >> GS40G_PAGE_SHIFT;
   12454 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12455 	if (rv != 0)
   12456 		goto release;
   12457 
   12458 	/* Write reg */
   12459 	offset = reg & GS40G_OFFSET_MASK;
   12460 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   12461 
   12462 release:
   12463 	/* Release semaphore */
   12464 	sc->phy.release(sc);
   12465 	return rv;
   12466 }
   12467 
   12468 /*
   12469  * wm_gmii_statchg:	[mii interface function]
   12470  *
   12471  *	Callback from MII layer when media changes.
   12472  */
   12473 static void
   12474 wm_gmii_statchg(struct ifnet *ifp)
   12475 {
   12476 	struct wm_softc *sc = ifp->if_softc;
   12477 	struct mii_data *mii = &sc->sc_mii;
   12478 
   12479 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   12480 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12481 	sc->sc_fcrtl &= ~FCRTL_XONE;
   12482 
   12483 	/* Get flow control negotiation result. */
   12484 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   12485 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   12486 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   12487 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   12488 	}
   12489 
   12490 	if (sc->sc_flowflags & IFM_FLOW) {
   12491 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   12492 			sc->sc_ctrl |= CTRL_TFCE;
   12493 			sc->sc_fcrtl |= FCRTL_XONE;
   12494 		}
   12495 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   12496 			sc->sc_ctrl |= CTRL_RFCE;
   12497 	}
   12498 
   12499 	if (mii->mii_media_active & IFM_FDX) {
   12500 		DPRINTF(sc, WM_DEBUG_LINK,
   12501 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   12502 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12503 	} else {
   12504 		DPRINTF(sc, WM_DEBUG_LINK,
   12505 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   12506 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12507 	}
   12508 
   12509 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12510 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12511 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   12512 						 : WMREG_FCRTL, sc->sc_fcrtl);
   12513 	if (sc->sc_type == WM_T_80003) {
   12514 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   12515 		case IFM_1000_T:
   12516 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12517 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   12518 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   12519 			break;
   12520 		default:
   12521 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12522 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   12523 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   12524 			break;
   12525 		}
   12526 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   12527 	}
   12528 }
   12529 
   12530 /* kumeran related (80003, ICH* and PCH*) */
   12531 
   12532 /*
   12533  * wm_kmrn_readreg:
   12534  *
   12535  *	Read a kumeran register
   12536  */
   12537 static int
   12538 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   12539 {
   12540 	int rv;
   12541 
   12542 	if (sc->sc_type == WM_T_80003)
   12543 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12544 	else
   12545 		rv = sc->phy.acquire(sc);
   12546 	if (rv != 0) {
   12547 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12548 		    __func__);
   12549 		return rv;
   12550 	}
   12551 
   12552 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   12553 
   12554 	if (sc->sc_type == WM_T_80003)
   12555 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12556 	else
   12557 		sc->phy.release(sc);
   12558 
   12559 	return rv;
   12560 }
   12561 
   12562 static int
   12563 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   12564 {
   12565 
   12566 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12567 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   12568 	    KUMCTRLSTA_REN);
   12569 	CSR_WRITE_FLUSH(sc);
   12570 	delay(2);
   12571 
   12572 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   12573 
   12574 	return 0;
   12575 }
   12576 
   12577 /*
   12578  * wm_kmrn_writereg:
   12579  *
   12580  *	Write a kumeran register
   12581  */
   12582 static int
   12583 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   12584 {
   12585 	int rv;
   12586 
   12587 	if (sc->sc_type == WM_T_80003)
   12588 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12589 	else
   12590 		rv = sc->phy.acquire(sc);
   12591 	if (rv != 0) {
   12592 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12593 		    __func__);
   12594 		return rv;
   12595 	}
   12596 
   12597 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   12598 
   12599 	if (sc->sc_type == WM_T_80003)
   12600 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12601 	else
   12602 		sc->phy.release(sc);
   12603 
   12604 	return rv;
   12605 }
   12606 
   12607 static int
   12608 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   12609 {
   12610 
   12611 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12612 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   12613 
   12614 	return 0;
   12615 }
   12616 
   12617 /*
   12618  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   12619  * This access method is different from IEEE MMD.
   12620  */
   12621 static int
   12622 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   12623 {
   12624 	struct wm_softc *sc = device_private(dev);
   12625 	int rv;
   12626 
   12627 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   12628 	if (rv != 0)
   12629 		return rv;
   12630 
   12631 	if (rd)
   12632 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12633 	else
   12634 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12635 	return rv;
   12636 }
   12637 
   12638 static int
   12639 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12640 {
   12641 
   12642 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12643 }
   12644 
   12645 static int
   12646 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12647 {
   12648 
   12649 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12650 }
   12651 
   12652 /* SGMII related */
   12653 
   12654 /*
   12655  * wm_sgmii_uses_mdio
   12656  *
   12657  * Check whether the transaction is to the internal PHY or the external
   12658  * MDIO interface. Return true if it's MDIO.
   12659  */
   12660 static bool
   12661 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12662 {
   12663 	uint32_t reg;
   12664 	bool ismdio = false;
   12665 
   12666 	switch (sc->sc_type) {
   12667 	case WM_T_82575:
   12668 	case WM_T_82576:
   12669 		reg = CSR_READ(sc, WMREG_MDIC);
   12670 		ismdio = ((reg & MDIC_DEST) != 0);
   12671 		break;
   12672 	case WM_T_82580:
   12673 	case WM_T_I350:
   12674 	case WM_T_I354:
   12675 	case WM_T_I210:
   12676 	case WM_T_I211:
   12677 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12678 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12679 		break;
   12680 	default:
   12681 		break;
   12682 	}
   12683 
   12684 	return ismdio;
   12685 }
   12686 
   12687 /* Setup internal SGMII PHY for SFP */
   12688 static void
   12689 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12690 {
   12691 	uint16_t id1, id2, phyreg;
   12692 	int i, rv;
   12693 
   12694 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12695 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12696 		return;
   12697 
   12698 	for (i = 0; i < MII_NPHY; i++) {
   12699 		sc->phy.no_errprint = true;
   12700 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12701 		if (rv != 0)
   12702 			continue;
   12703 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12704 		if (rv != 0)
   12705 			continue;
   12706 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12707 			continue;
   12708 		sc->phy.no_errprint = false;
   12709 
   12710 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12711 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12712 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12713 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12714 		break;
   12715 	}
   12716 
   12717 }
   12718 
   12719 /*
   12720  * wm_sgmii_readreg:	[mii interface function]
   12721  *
   12722  *	Read a PHY register on the SGMII
   12723  * This could be handled by the PHY layer if we didn't have to lock the
   12724  * resource ...
   12725  */
   12726 static int
   12727 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12728 {
   12729 	struct wm_softc *sc = device_private(dev);
   12730 	int rv;
   12731 
   12732 	rv = sc->phy.acquire(sc);
   12733 	if (rv != 0) {
   12734 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12735 		return rv;
   12736 	}
   12737 
   12738 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12739 
   12740 	sc->phy.release(sc);
   12741 	return rv;
   12742 }
   12743 
   12744 static int
   12745 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12746 {
   12747 	struct wm_softc *sc = device_private(dev);
   12748 	uint32_t i2ccmd;
   12749 	int i, rv = 0;
   12750 
   12751 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12752 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12753 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12754 
   12755 	/* Poll the ready bit */
   12756 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12757 		delay(50);
   12758 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12759 		if (i2ccmd & I2CCMD_READY)
   12760 			break;
   12761 	}
   12762 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12763 		device_printf(dev, "I2CCMD Read did not complete\n");
   12764 		rv = ETIMEDOUT;
   12765 	}
   12766 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12767 		if (!sc->phy.no_errprint)
   12768 			device_printf(dev, "I2CCMD Error bit set\n");
   12769 		rv = EIO;
   12770 	}
   12771 
   12772 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12773 
   12774 	return rv;
   12775 }
   12776 
   12777 /*
   12778  * wm_sgmii_writereg:	[mii interface function]
   12779  *
   12780  *	Write a PHY register on the SGMII.
   12781  * This could be handled by the PHY layer if we didn't have to lock the
   12782  * resource ...
   12783  */
   12784 static int
   12785 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12786 {
   12787 	struct wm_softc *sc = device_private(dev);
   12788 	int rv;
   12789 
   12790 	rv = sc->phy.acquire(sc);
   12791 	if (rv != 0) {
   12792 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12793 		return rv;
   12794 	}
   12795 
   12796 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12797 
   12798 	sc->phy.release(sc);
   12799 
   12800 	return rv;
   12801 }
   12802 
   12803 static int
   12804 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12805 {
   12806 	struct wm_softc *sc = device_private(dev);
   12807 	uint32_t i2ccmd;
   12808 	uint16_t swapdata;
   12809 	int rv = 0;
   12810 	int i;
   12811 
   12812 	/* Swap the data bytes for the I2C interface */
   12813 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12814 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12815 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12816 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12817 
   12818 	/* Poll the ready bit */
   12819 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12820 		delay(50);
   12821 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12822 		if (i2ccmd & I2CCMD_READY)
   12823 			break;
   12824 	}
   12825 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12826 		device_printf(dev, "I2CCMD Write did not complete\n");
   12827 		rv = ETIMEDOUT;
   12828 	}
   12829 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12830 		device_printf(dev, "I2CCMD Error bit set\n");
   12831 		rv = EIO;
   12832 	}
   12833 
   12834 	return rv;
   12835 }
   12836 
   12837 /* TBI related */
   12838 
   12839 static bool
   12840 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12841 {
   12842 	bool sig;
   12843 
   12844 	sig = ctrl & CTRL_SWDPIN(1);
   12845 
   12846 	/*
   12847 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12848 	 * detect a signal, 1 if they don't.
   12849 	 */
   12850 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12851 		sig = !sig;
   12852 
   12853 	return sig;
   12854 }
   12855 
   12856 /*
   12857  * wm_tbi_mediainit:
   12858  *
   12859  *	Initialize media for use on 1000BASE-X devices.
   12860  */
   12861 static void
   12862 wm_tbi_mediainit(struct wm_softc *sc)
   12863 {
   12864 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12865 	const char *sep = "";
   12866 
   12867 	if (sc->sc_type < WM_T_82543)
   12868 		sc->sc_tipg = TIPG_WM_DFLT;
   12869 	else
   12870 		sc->sc_tipg = TIPG_LG_DFLT;
   12871 
   12872 	sc->sc_tbi_serdes_anegticks = 5;
   12873 
   12874 	/* Initialize our media structures */
   12875 	sc->sc_mii.mii_ifp = ifp;
   12876 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12877 
   12878 	ifp->if_baudrate = IF_Gbps(1);
   12879 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12880 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12881 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12882 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12883 		    sc->sc_core_lock);
   12884 	} else {
   12885 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12886 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12887 	}
   12888 
   12889 	/*
   12890 	 * SWD Pins:
   12891 	 *
   12892 	 *	0 = Link LED (output)
   12893 	 *	1 = Loss Of Signal (input)
   12894 	 */
   12895 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12896 
   12897 	/* XXX Perhaps this is only for TBI */
   12898 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12899 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12900 
   12901 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12902 		sc->sc_ctrl &= ~CTRL_LRST;
   12903 
   12904 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12905 
   12906 #define	ADD(ss, mm, dd)							  \
   12907 do {									  \
   12908 	aprint_normal("%s%s", sep, ss);					  \
   12909 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12910 	sep = ", ";							  \
   12911 } while (/*CONSTCOND*/0)
   12912 
   12913 	aprint_normal_dev(sc->sc_dev, "");
   12914 
   12915 	if (sc->sc_type == WM_T_I354) {
   12916 		uint32_t status;
   12917 
   12918 		status = CSR_READ(sc, WMREG_STATUS);
   12919 		if (((status & STATUS_2P5_SKU) != 0)
   12920 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12921 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12922 		} else
   12923 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12924 	} else if (sc->sc_type == WM_T_82545) {
   12925 		/* Only 82545 is LX (XXX except SFP) */
   12926 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12927 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12928 	} else if (sc->sc_sfptype != 0) {
   12929 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12930 		switch (sc->sc_sfptype) {
   12931 		default:
   12932 		case SFF_SFP_ETH_FLAGS_1000SX:
   12933 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12934 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12935 			break;
   12936 		case SFF_SFP_ETH_FLAGS_1000LX:
   12937 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12938 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12939 			break;
   12940 		case SFF_SFP_ETH_FLAGS_1000CX:
   12941 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12942 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12943 			break;
   12944 		case SFF_SFP_ETH_FLAGS_1000T:
   12945 			ADD("1000baseT", IFM_1000_T, 0);
   12946 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12947 			break;
   12948 		case SFF_SFP_ETH_FLAGS_100FX:
   12949 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12950 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12951 			break;
   12952 		}
   12953 	} else {
   12954 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12955 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12956 	}
   12957 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12958 	aprint_normal("\n");
   12959 
   12960 #undef ADD
   12961 
   12962 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12963 }
   12964 
   12965 /*
   12966  * wm_tbi_mediachange:	[ifmedia interface function]
   12967  *
   12968  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12969  */
   12970 static int
   12971 wm_tbi_mediachange(struct ifnet *ifp)
   12972 {
   12973 	struct wm_softc *sc = ifp->if_softc;
   12974 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12975 	uint32_t status, ctrl;
   12976 	bool signal;
   12977 	int i;
   12978 
   12979 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12980 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12981 		/* XXX need some work for >= 82571 and < 82575 */
   12982 		if (sc->sc_type < WM_T_82575)
   12983 			return 0;
   12984 	}
   12985 
   12986 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12987 	    || (sc->sc_type >= WM_T_82575))
   12988 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12989 
   12990 	sc->sc_ctrl &= ~CTRL_LRST;
   12991 	sc->sc_txcw = TXCW_ANE;
   12992 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12993 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12994 	else if (ife->ifm_media & IFM_FDX)
   12995 		sc->sc_txcw |= TXCW_FD;
   12996 	else
   12997 		sc->sc_txcw |= TXCW_HD;
   12998 
   12999 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   13000 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   13001 
   13002 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   13003 		device_xname(sc->sc_dev), sc->sc_txcw));
   13004 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13005 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13006 	CSR_WRITE_FLUSH(sc);
   13007 	delay(1000);
   13008 
   13009 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13010 	signal = wm_tbi_havesignal(sc, ctrl);
   13011 
   13012 	DPRINTF(sc, WM_DEBUG_LINK,
   13013 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
   13014 
   13015 	if (signal) {
   13016 		/* Have signal; wait for the link to come up. */
   13017 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   13018 			delay(10000);
   13019 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   13020 				break;
   13021 		}
   13022 
   13023 		DPRINTF(sc, WM_DEBUG_LINK,
   13024 		    ("%s: i = %d after waiting for link\n",
   13025 			device_xname(sc->sc_dev), i));
   13026 
   13027 		status = CSR_READ(sc, WMREG_STATUS);
   13028 		DPRINTF(sc, WM_DEBUG_LINK,
   13029 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
   13030 			__PRIxBIT "\n",
   13031 			device_xname(sc->sc_dev), status, STATUS_LU));
   13032 		if (status & STATUS_LU) {
   13033 			/* Link is up. */
   13034 			DPRINTF(sc, WM_DEBUG_LINK,
   13035 			    ("%s: LINK: set media -> link up %s\n",
   13036 				device_xname(sc->sc_dev),
   13037 				(status & STATUS_FD) ? "FDX" : "HDX"));
   13038 
   13039 			/*
   13040 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   13041 			 * so we should update sc->sc_ctrl
   13042 			 */
   13043 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   13044 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   13045 			sc->sc_fcrtl &= ~FCRTL_XONE;
   13046 			if (status & STATUS_FD)
   13047 				sc->sc_tctl |=
   13048 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   13049 			else
   13050 				sc->sc_tctl |=
   13051 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   13052 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   13053 				sc->sc_fcrtl |= FCRTL_XONE;
   13054 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   13055 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   13056 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   13057 			sc->sc_tbi_linkup = 1;
   13058 		} else {
   13059 			if (i == WM_LINKUP_TIMEOUT)
   13060 				wm_check_for_link(sc);
   13061 			/* Link is down. */
   13062 			DPRINTF(sc, WM_DEBUG_LINK,
   13063 			    ("%s: LINK: set media -> link down\n",
   13064 				device_xname(sc->sc_dev)));
   13065 			sc->sc_tbi_linkup = 0;
   13066 		}
   13067 	} else {
   13068 		DPRINTF(sc, WM_DEBUG_LINK,
   13069 		    ("%s: LINK: set media -> no signal\n",
   13070 			device_xname(sc->sc_dev)));
   13071 		sc->sc_tbi_linkup = 0;
   13072 	}
   13073 
   13074 	wm_tbi_serdes_set_linkled(sc);
   13075 
   13076 	return 0;
   13077 }
   13078 
   13079 /*
   13080  * wm_tbi_mediastatus:	[ifmedia interface function]
   13081  *
   13082  *	Get the current interface media status on a 1000BASE-X device.
   13083  */
   13084 static void
   13085 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13086 {
   13087 	struct wm_softc *sc = ifp->if_softc;
   13088 	uint32_t ctrl, status;
   13089 
   13090 	ifmr->ifm_status = IFM_AVALID;
   13091 	ifmr->ifm_active = IFM_ETHER;
   13092 
   13093 	status = CSR_READ(sc, WMREG_STATUS);
   13094 	if ((status & STATUS_LU) == 0) {
   13095 		ifmr->ifm_active |= IFM_NONE;
   13096 		return;
   13097 	}
   13098 
   13099 	ifmr->ifm_status |= IFM_ACTIVE;
   13100 	/* Only 82545 is LX */
   13101 	if (sc->sc_type == WM_T_82545)
   13102 		ifmr->ifm_active |= IFM_1000_LX;
   13103 	else
   13104 		ifmr->ifm_active |= IFM_1000_SX;
   13105 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   13106 		ifmr->ifm_active |= IFM_FDX;
   13107 	else
   13108 		ifmr->ifm_active |= IFM_HDX;
   13109 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13110 	if (ctrl & CTRL_RFCE)
   13111 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   13112 	if (ctrl & CTRL_TFCE)
   13113 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   13114 }
   13115 
   13116 /* XXX TBI only */
   13117 static int
   13118 wm_check_for_link(struct wm_softc *sc)
   13119 {
   13120 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13121 	uint32_t rxcw;
   13122 	uint32_t ctrl;
   13123 	uint32_t status;
   13124 	bool signal;
   13125 
   13126 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   13127 		device_xname(sc->sc_dev), __func__));
   13128 
   13129 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13130 		/* XXX need some work for >= 82571 */
   13131 		if (sc->sc_type >= WM_T_82571) {
   13132 			sc->sc_tbi_linkup = 1;
   13133 			return 0;
   13134 		}
   13135 	}
   13136 
   13137 	rxcw = CSR_READ(sc, WMREG_RXCW);
   13138 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13139 	status = CSR_READ(sc, WMREG_STATUS);
   13140 	signal = wm_tbi_havesignal(sc, ctrl);
   13141 
   13142 	DPRINTF(sc, WM_DEBUG_LINK,
   13143 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   13144 		device_xname(sc->sc_dev), __func__, signal,
   13145 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   13146 
   13147 	/*
   13148 	 * SWDPIN   LU RXCW
   13149 	 *	0    0	  0
   13150 	 *	0    0	  1	(should not happen)
   13151 	 *	0    1	  0	(should not happen)
   13152 	 *	0    1	  1	(should not happen)
   13153 	 *	1    0	  0	Disable autonego and force linkup
   13154 	 *	1    0	  1	got /C/ but not linkup yet
   13155 	 *	1    1	  0	(linkup)
   13156 	 *	1    1	  1	If IFM_AUTO, back to autonego
   13157 	 *
   13158 	 */
   13159 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   13160 		DPRINTF(sc, WM_DEBUG_LINK,
   13161 		    ("%s: %s: force linkup and fullduplex\n",
   13162 			device_xname(sc->sc_dev), __func__));
   13163 		sc->sc_tbi_linkup = 0;
   13164 		/* Disable auto-negotiation in the TXCW register */
   13165 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   13166 
   13167 		/*
   13168 		 * Force link-up and also force full-duplex.
   13169 		 *
   13170 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   13171 		 * so we should update sc->sc_ctrl
   13172 		 */
   13173 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   13174 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13175 	} else if (((status & STATUS_LU) != 0)
   13176 	    && ((rxcw & RXCW_C) != 0)
   13177 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   13178 		sc->sc_tbi_linkup = 1;
   13179 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   13180 			device_xname(sc->sc_dev), __func__));
   13181 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13182 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   13183 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   13184 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   13185 			device_xname(sc->sc_dev), __func__));
   13186 	} else {
   13187 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   13188 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   13189 			status));
   13190 	}
   13191 
   13192 	return 0;
   13193 }
   13194 
   13195 /*
   13196  * wm_tbi_tick:
   13197  *
   13198  *	Check the link on TBI devices.
   13199  *	This function acts as mii_tick().
   13200  */
   13201 static void
   13202 wm_tbi_tick(struct wm_softc *sc)
   13203 {
   13204 	struct mii_data *mii = &sc->sc_mii;
   13205 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13206 	uint32_t status;
   13207 
   13208 	KASSERT(WM_CORE_LOCKED(sc));
   13209 
   13210 	status = CSR_READ(sc, WMREG_STATUS);
   13211 
   13212 	/* XXX is this needed? */
   13213 	(void)CSR_READ(sc, WMREG_RXCW);
   13214 	(void)CSR_READ(sc, WMREG_CTRL);
   13215 
   13216 	/* set link status */
   13217 	if ((status & STATUS_LU) == 0) {
   13218 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   13219 			device_xname(sc->sc_dev)));
   13220 		sc->sc_tbi_linkup = 0;
   13221 	} else if (sc->sc_tbi_linkup == 0) {
   13222 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   13223 			device_xname(sc->sc_dev),
   13224 			(status & STATUS_FD) ? "FDX" : "HDX"));
   13225 		sc->sc_tbi_linkup = 1;
   13226 		sc->sc_tbi_serdes_ticks = 0;
   13227 	}
   13228 
   13229 	if ((sc->sc_if_flags & IFF_UP) == 0)
   13230 		goto setled;
   13231 
   13232 	if ((status & STATUS_LU) == 0) {
   13233 		sc->sc_tbi_linkup = 0;
   13234 		/* If the timer expired, retry autonegotiation */
   13235 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13236 		    && (++sc->sc_tbi_serdes_ticks
   13237 			>= sc->sc_tbi_serdes_anegticks)) {
   13238 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13239 				device_xname(sc->sc_dev), __func__));
   13240 			sc->sc_tbi_serdes_ticks = 0;
   13241 			/*
   13242 			 * Reset the link, and let autonegotiation do
   13243 			 * its thing
   13244 			 */
   13245 			sc->sc_ctrl |= CTRL_LRST;
   13246 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13247 			CSR_WRITE_FLUSH(sc);
   13248 			delay(1000);
   13249 			sc->sc_ctrl &= ~CTRL_LRST;
   13250 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13251 			CSR_WRITE_FLUSH(sc);
   13252 			delay(1000);
   13253 			CSR_WRITE(sc, WMREG_TXCW,
   13254 			    sc->sc_txcw & ~TXCW_ANE);
   13255 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13256 		}
   13257 	}
   13258 
   13259 setled:
   13260 	wm_tbi_serdes_set_linkled(sc);
   13261 }
   13262 
   13263 /* SERDES related */
   13264 static void
   13265 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   13266 {
   13267 	uint32_t reg;
   13268 
   13269 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13270 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13271 		return;
   13272 
   13273 	/* Enable PCS to turn on link */
   13274 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   13275 	reg |= PCS_CFG_PCS_EN;
   13276 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   13277 
   13278 	/* Power up the laser */
   13279 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13280 	reg &= ~CTRL_EXT_SWDPIN(3);
   13281 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13282 
   13283 	/* Flush the write to verify completion */
   13284 	CSR_WRITE_FLUSH(sc);
   13285 	delay(1000);
   13286 }
   13287 
   13288 static int
   13289 wm_serdes_mediachange(struct ifnet *ifp)
   13290 {
   13291 	struct wm_softc *sc = ifp->if_softc;
   13292 	bool pcs_autoneg = true; /* XXX */
   13293 	uint32_t ctrl_ext, pcs_lctl, reg;
   13294 
   13295 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13296 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13297 		return 0;
   13298 
   13299 	/* XXX Currently, this function is not called on 8257[12] */
   13300 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13301 	    || (sc->sc_type >= WM_T_82575))
   13302 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13303 
   13304 	/* Power on the sfp cage if present */
   13305 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13306 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13307 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   13308 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13309 
   13310 	sc->sc_ctrl |= CTRL_SLU;
   13311 
   13312 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   13313 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   13314 
   13315 		reg = CSR_READ(sc, WMREG_CONNSW);
   13316 		reg |= CONNSW_ENRGSRC;
   13317 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   13318 	}
   13319 
   13320 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   13321 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   13322 	case CTRL_EXT_LINK_MODE_SGMII:
   13323 		/* SGMII mode lets the phy handle forcing speed/duplex */
   13324 		pcs_autoneg = true;
   13325 		/* Autoneg time out should be disabled for SGMII mode */
   13326 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   13327 		break;
   13328 	case CTRL_EXT_LINK_MODE_1000KX:
   13329 		pcs_autoneg = false;
   13330 		/* FALLTHROUGH */
   13331 	default:
   13332 		if ((sc->sc_type == WM_T_82575)
   13333 		    || (sc->sc_type == WM_T_82576)) {
   13334 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   13335 				pcs_autoneg = false;
   13336 		}
   13337 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   13338 		    | CTRL_FRCFDX;
   13339 
   13340 		/* Set speed of 1000/Full if speed/duplex is forced */
   13341 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   13342 	}
   13343 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13344 
   13345 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   13346 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   13347 
   13348 	if (pcs_autoneg) {
   13349 		/* Set PCS register for autoneg */
   13350 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   13351 
   13352 		/* Disable force flow control for autoneg */
   13353 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   13354 
   13355 		/* Configure flow control advertisement for autoneg */
   13356 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   13357 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   13358 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   13359 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   13360 	} else
   13361 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   13362 
   13363 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   13364 
   13365 	return 0;
   13366 }
   13367 
   13368 static void
   13369 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13370 {
   13371 	struct wm_softc *sc = ifp->if_softc;
   13372 	struct mii_data *mii = &sc->sc_mii;
   13373 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13374 	uint32_t pcs_adv, pcs_lpab, reg;
   13375 
   13376 	ifmr->ifm_status = IFM_AVALID;
   13377 	ifmr->ifm_active = IFM_ETHER;
   13378 
   13379 	/* Check PCS */
   13380 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13381 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   13382 		ifmr->ifm_active |= IFM_NONE;
   13383 		sc->sc_tbi_linkup = 0;
   13384 		goto setled;
   13385 	}
   13386 
   13387 	sc->sc_tbi_linkup = 1;
   13388 	ifmr->ifm_status |= IFM_ACTIVE;
   13389 	if (sc->sc_type == WM_T_I354) {
   13390 		uint32_t status;
   13391 
   13392 		status = CSR_READ(sc, WMREG_STATUS);
   13393 		if (((status & STATUS_2P5_SKU) != 0)
   13394 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13395 			ifmr->ifm_active |= IFM_2500_KX;
   13396 		} else
   13397 			ifmr->ifm_active |= IFM_1000_KX;
   13398 	} else {
   13399 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   13400 		case PCS_LSTS_SPEED_10:
   13401 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   13402 			break;
   13403 		case PCS_LSTS_SPEED_100:
   13404 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   13405 			break;
   13406 		case PCS_LSTS_SPEED_1000:
   13407 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13408 			break;
   13409 		default:
   13410 			device_printf(sc->sc_dev, "Unknown speed\n");
   13411 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13412 			break;
   13413 		}
   13414 	}
   13415 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   13416 	if ((reg & PCS_LSTS_FDX) != 0)
   13417 		ifmr->ifm_active |= IFM_FDX;
   13418 	else
   13419 		ifmr->ifm_active |= IFM_HDX;
   13420 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   13421 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   13422 		/* Check flow */
   13423 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13424 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   13425 			DPRINTF(sc, WM_DEBUG_LINK,
   13426 			    ("XXX LINKOK but not ACOMP\n"));
   13427 			goto setled;
   13428 		}
   13429 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   13430 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   13431 		DPRINTF(sc, WM_DEBUG_LINK,
   13432 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   13433 		if ((pcs_adv & TXCW_SYM_PAUSE)
   13434 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   13435 			mii->mii_media_active |= IFM_FLOW
   13436 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   13437 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   13438 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13439 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   13440 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13441 			mii->mii_media_active |= IFM_FLOW
   13442 			    | IFM_ETH_TXPAUSE;
   13443 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   13444 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13445 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   13446 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13447 			mii->mii_media_active |= IFM_FLOW
   13448 			    | IFM_ETH_RXPAUSE;
   13449 		}
   13450 	}
   13451 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   13452 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   13453 setled:
   13454 	wm_tbi_serdes_set_linkled(sc);
   13455 }
   13456 
   13457 /*
   13458  * wm_serdes_tick:
   13459  *
   13460  *	Check the link on serdes devices.
   13461  */
   13462 static void
   13463 wm_serdes_tick(struct wm_softc *sc)
   13464 {
   13465 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13466 	struct mii_data *mii = &sc->sc_mii;
   13467 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13468 	uint32_t reg;
   13469 
   13470 	KASSERT(WM_CORE_LOCKED(sc));
   13471 
   13472 	mii->mii_media_status = IFM_AVALID;
   13473 	mii->mii_media_active = IFM_ETHER;
   13474 
   13475 	/* Check PCS */
   13476 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13477 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   13478 		mii->mii_media_status |= IFM_ACTIVE;
   13479 		sc->sc_tbi_linkup = 1;
   13480 		sc->sc_tbi_serdes_ticks = 0;
   13481 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   13482 		if ((reg & PCS_LSTS_FDX) != 0)
   13483 			mii->mii_media_active |= IFM_FDX;
   13484 		else
   13485 			mii->mii_media_active |= IFM_HDX;
   13486 	} else {
   13487 		mii->mii_media_status |= IFM_NONE;
   13488 		sc->sc_tbi_linkup = 0;
   13489 		/* If the timer expired, retry autonegotiation */
   13490 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13491 		    && (++sc->sc_tbi_serdes_ticks
   13492 			>= sc->sc_tbi_serdes_anegticks)) {
   13493 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13494 				device_xname(sc->sc_dev), __func__));
   13495 			sc->sc_tbi_serdes_ticks = 0;
   13496 			/* XXX */
   13497 			wm_serdes_mediachange(ifp);
   13498 		}
   13499 	}
   13500 
   13501 	wm_tbi_serdes_set_linkled(sc);
   13502 }
   13503 
   13504 /* SFP related */
   13505 
   13506 static int
   13507 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   13508 {
   13509 	uint32_t i2ccmd;
   13510 	int i;
   13511 
   13512 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13513 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13514 
   13515 	/* Poll the ready bit */
   13516 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13517 		delay(50);
   13518 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13519 		if (i2ccmd & I2CCMD_READY)
   13520 			break;
   13521 	}
   13522 	if ((i2ccmd & I2CCMD_READY) == 0)
   13523 		return -1;
   13524 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   13525 		return -1;
   13526 
   13527 	*data = i2ccmd & 0x00ff;
   13528 
   13529 	return 0;
   13530 }
   13531 
   13532 static uint32_t
   13533 wm_sfp_get_media_type(struct wm_softc *sc)
   13534 {
   13535 	uint32_t ctrl_ext;
   13536 	uint8_t val = 0;
   13537 	int timeout = 3;
   13538 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   13539 	int rv = -1;
   13540 
   13541 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13542 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13543 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   13544 	CSR_WRITE_FLUSH(sc);
   13545 
   13546 	/* Read SFP module data */
   13547 	while (timeout) {
   13548 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   13549 		if (rv == 0)
   13550 			break;
   13551 		delay(100*1000); /* XXX too big */
   13552 		timeout--;
   13553 	}
   13554 	if (rv != 0)
   13555 		goto out;
   13556 
   13557 	switch (val) {
   13558 	case SFF_SFP_ID_SFF:
   13559 		aprint_normal_dev(sc->sc_dev,
   13560 		    "Module/Connector soldered to board\n");
   13561 		break;
   13562 	case SFF_SFP_ID_SFP:
   13563 		sc->sc_flags |= WM_F_SFP;
   13564 		break;
   13565 	case SFF_SFP_ID_UNKNOWN:
   13566 		goto out;
   13567 	default:
   13568 		break;
   13569 	}
   13570 
   13571 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   13572 	if (rv != 0)
   13573 		goto out;
   13574 
   13575 	sc->sc_sfptype = val;
   13576 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   13577 		mediatype = WM_MEDIATYPE_SERDES;
   13578 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   13579 		sc->sc_flags |= WM_F_SGMII;
   13580 		mediatype = WM_MEDIATYPE_COPPER;
   13581 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   13582 		sc->sc_flags |= WM_F_SGMII;
   13583 		mediatype = WM_MEDIATYPE_SERDES;
   13584 	} else {
   13585 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   13586 		    __func__, sc->sc_sfptype);
   13587 		sc->sc_sfptype = 0; /* XXX unknown */
   13588 	}
   13589 
   13590 out:
   13591 	/* Restore I2C interface setting */
   13592 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13593 
   13594 	return mediatype;
   13595 }
   13596 
   13597 /*
   13598  * NVM related.
   13599  * Microwire, SPI (w/wo EERD) and Flash.
   13600  */
   13601 
   13602 /* Both spi and uwire */
   13603 
   13604 /*
   13605  * wm_eeprom_sendbits:
   13606  *
   13607  *	Send a series of bits to the EEPROM.
   13608  */
   13609 static void
   13610 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   13611 {
   13612 	uint32_t reg;
   13613 	int x;
   13614 
   13615 	reg = CSR_READ(sc, WMREG_EECD);
   13616 
   13617 	for (x = nbits; x > 0; x--) {
   13618 		if (bits & (1U << (x - 1)))
   13619 			reg |= EECD_DI;
   13620 		else
   13621 			reg &= ~EECD_DI;
   13622 		CSR_WRITE(sc, WMREG_EECD, reg);
   13623 		CSR_WRITE_FLUSH(sc);
   13624 		delay(2);
   13625 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13626 		CSR_WRITE_FLUSH(sc);
   13627 		delay(2);
   13628 		CSR_WRITE(sc, WMREG_EECD, reg);
   13629 		CSR_WRITE_FLUSH(sc);
   13630 		delay(2);
   13631 	}
   13632 }
   13633 
   13634 /*
   13635  * wm_eeprom_recvbits:
   13636  *
   13637  *	Receive a series of bits from the EEPROM.
   13638  */
   13639 static void
   13640 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13641 {
   13642 	uint32_t reg, val;
   13643 	int x;
   13644 
   13645 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13646 
   13647 	val = 0;
   13648 	for (x = nbits; x > 0; x--) {
   13649 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13650 		CSR_WRITE_FLUSH(sc);
   13651 		delay(2);
   13652 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13653 			val |= (1U << (x - 1));
   13654 		CSR_WRITE(sc, WMREG_EECD, reg);
   13655 		CSR_WRITE_FLUSH(sc);
   13656 		delay(2);
   13657 	}
   13658 	*valp = val;
   13659 }
   13660 
   13661 /* Microwire */
   13662 
   13663 /*
   13664  * wm_nvm_read_uwire:
   13665  *
   13666  *	Read a word from the EEPROM using the MicroWire protocol.
   13667  */
   13668 static int
   13669 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13670 {
   13671 	uint32_t reg, val;
   13672 	int i, rv;
   13673 
   13674 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13675 		device_xname(sc->sc_dev), __func__));
   13676 
   13677 	rv = sc->nvm.acquire(sc);
   13678 	if (rv != 0)
   13679 		return rv;
   13680 
   13681 	for (i = 0; i < wordcnt; i++) {
   13682 		/* Clear SK and DI. */
   13683 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13684 		CSR_WRITE(sc, WMREG_EECD, reg);
   13685 
   13686 		/*
   13687 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13688 		 * and Xen.
   13689 		 *
   13690 		 * We use this workaround only for 82540 because qemu's
   13691 		 * e1000 act as 82540.
   13692 		 */
   13693 		if (sc->sc_type == WM_T_82540) {
   13694 			reg |= EECD_SK;
   13695 			CSR_WRITE(sc, WMREG_EECD, reg);
   13696 			reg &= ~EECD_SK;
   13697 			CSR_WRITE(sc, WMREG_EECD, reg);
   13698 			CSR_WRITE_FLUSH(sc);
   13699 			delay(2);
   13700 		}
   13701 		/* XXX: end of workaround */
   13702 
   13703 		/* Set CHIP SELECT. */
   13704 		reg |= EECD_CS;
   13705 		CSR_WRITE(sc, WMREG_EECD, reg);
   13706 		CSR_WRITE_FLUSH(sc);
   13707 		delay(2);
   13708 
   13709 		/* Shift in the READ command. */
   13710 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13711 
   13712 		/* Shift in address. */
   13713 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13714 
   13715 		/* Shift out the data. */
   13716 		wm_eeprom_recvbits(sc, &val, 16);
   13717 		data[i] = val & 0xffff;
   13718 
   13719 		/* Clear CHIP SELECT. */
   13720 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13721 		CSR_WRITE(sc, WMREG_EECD, reg);
   13722 		CSR_WRITE_FLUSH(sc);
   13723 		delay(2);
   13724 	}
   13725 
   13726 	sc->nvm.release(sc);
   13727 	return 0;
   13728 }
   13729 
   13730 /* SPI */
   13731 
   13732 /*
   13733  * Set SPI and FLASH related information from the EECD register.
   13734  * For 82541 and 82547, the word size is taken from EEPROM.
   13735  */
   13736 static int
   13737 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13738 {
   13739 	int size;
   13740 	uint32_t reg;
   13741 	uint16_t data;
   13742 
   13743 	reg = CSR_READ(sc, WMREG_EECD);
   13744 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13745 
   13746 	/* Read the size of NVM from EECD by default */
   13747 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13748 	switch (sc->sc_type) {
   13749 	case WM_T_82541:
   13750 	case WM_T_82541_2:
   13751 	case WM_T_82547:
   13752 	case WM_T_82547_2:
   13753 		/* Set dummy value to access EEPROM */
   13754 		sc->sc_nvm_wordsize = 64;
   13755 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13756 			aprint_error_dev(sc->sc_dev,
   13757 			    "%s: failed to read EEPROM size\n", __func__);
   13758 		}
   13759 		reg = data;
   13760 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13761 		if (size == 0)
   13762 			size = 6; /* 64 word size */
   13763 		else
   13764 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13765 		break;
   13766 	case WM_T_80003:
   13767 	case WM_T_82571:
   13768 	case WM_T_82572:
   13769 	case WM_T_82573: /* SPI case */
   13770 	case WM_T_82574: /* SPI case */
   13771 	case WM_T_82583: /* SPI case */
   13772 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13773 		if (size > 14)
   13774 			size = 14;
   13775 		break;
   13776 	case WM_T_82575:
   13777 	case WM_T_82576:
   13778 	case WM_T_82580:
   13779 	case WM_T_I350:
   13780 	case WM_T_I354:
   13781 	case WM_T_I210:
   13782 	case WM_T_I211:
   13783 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13784 		if (size > 15)
   13785 			size = 15;
   13786 		break;
   13787 	default:
   13788 		aprint_error_dev(sc->sc_dev,
   13789 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13790 		return -1;
   13791 		break;
   13792 	}
   13793 
   13794 	sc->sc_nvm_wordsize = 1 << size;
   13795 
   13796 	return 0;
   13797 }
   13798 
   13799 /*
   13800  * wm_nvm_ready_spi:
   13801  *
   13802  *	Wait for a SPI EEPROM to be ready for commands.
   13803  */
   13804 static int
   13805 wm_nvm_ready_spi(struct wm_softc *sc)
   13806 {
   13807 	uint32_t val;
   13808 	int usec;
   13809 
   13810 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13811 		device_xname(sc->sc_dev), __func__));
   13812 
   13813 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13814 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13815 		wm_eeprom_recvbits(sc, &val, 8);
   13816 		if ((val & SPI_SR_RDY) == 0)
   13817 			break;
   13818 	}
   13819 	if (usec >= SPI_MAX_RETRIES) {
   13820 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13821 		return -1;
   13822 	}
   13823 	return 0;
   13824 }
   13825 
   13826 /*
   13827  * wm_nvm_read_spi:
   13828  *
   13829  *	Read a work from the EEPROM using the SPI protocol.
   13830  */
   13831 static int
   13832 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13833 {
   13834 	uint32_t reg, val;
   13835 	int i;
   13836 	uint8_t opc;
   13837 	int rv;
   13838 
   13839 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13840 		device_xname(sc->sc_dev), __func__));
   13841 
   13842 	rv = sc->nvm.acquire(sc);
   13843 	if (rv != 0)
   13844 		return rv;
   13845 
   13846 	/* Clear SK and CS. */
   13847 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13848 	CSR_WRITE(sc, WMREG_EECD, reg);
   13849 	CSR_WRITE_FLUSH(sc);
   13850 	delay(2);
   13851 
   13852 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13853 		goto out;
   13854 
   13855 	/* Toggle CS to flush commands. */
   13856 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13857 	CSR_WRITE_FLUSH(sc);
   13858 	delay(2);
   13859 	CSR_WRITE(sc, WMREG_EECD, reg);
   13860 	CSR_WRITE_FLUSH(sc);
   13861 	delay(2);
   13862 
   13863 	opc = SPI_OPC_READ;
   13864 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13865 		opc |= SPI_OPC_A8;
   13866 
   13867 	wm_eeprom_sendbits(sc, opc, 8);
   13868 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13869 
   13870 	for (i = 0; i < wordcnt; i++) {
   13871 		wm_eeprom_recvbits(sc, &val, 16);
   13872 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13873 	}
   13874 
   13875 	/* Raise CS and clear SK. */
   13876 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13877 	CSR_WRITE(sc, WMREG_EECD, reg);
   13878 	CSR_WRITE_FLUSH(sc);
   13879 	delay(2);
   13880 
   13881 out:
   13882 	sc->nvm.release(sc);
   13883 	return rv;
   13884 }
   13885 
   13886 /* Using with EERD */
   13887 
   13888 static int
   13889 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13890 {
   13891 	uint32_t attempts = 100000;
   13892 	uint32_t i, reg = 0;
   13893 	int32_t done = -1;
   13894 
   13895 	for (i = 0; i < attempts; i++) {
   13896 		reg = CSR_READ(sc, rw);
   13897 
   13898 		if (reg & EERD_DONE) {
   13899 			done = 0;
   13900 			break;
   13901 		}
   13902 		delay(5);
   13903 	}
   13904 
   13905 	return done;
   13906 }
   13907 
   13908 static int
   13909 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13910 {
   13911 	int i, eerd = 0;
   13912 	int rv;
   13913 
   13914 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13915 		device_xname(sc->sc_dev), __func__));
   13916 
   13917 	rv = sc->nvm.acquire(sc);
   13918 	if (rv != 0)
   13919 		return rv;
   13920 
   13921 	for (i = 0; i < wordcnt; i++) {
   13922 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13923 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13924 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13925 		if (rv != 0) {
   13926 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13927 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13928 			break;
   13929 		}
   13930 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13931 	}
   13932 
   13933 	sc->nvm.release(sc);
   13934 	return rv;
   13935 }
   13936 
   13937 /* Flash */
   13938 
   13939 static int
   13940 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13941 {
   13942 	uint32_t eecd;
   13943 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13944 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13945 	uint32_t nvm_dword = 0;
   13946 	uint8_t sig_byte = 0;
   13947 	int rv;
   13948 
   13949 	switch (sc->sc_type) {
   13950 	case WM_T_PCH_SPT:
   13951 	case WM_T_PCH_CNP:
   13952 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13953 		act_offset = ICH_NVM_SIG_WORD * 2;
   13954 
   13955 		/* Set bank to 0 in case flash read fails. */
   13956 		*bank = 0;
   13957 
   13958 		/* Check bank 0 */
   13959 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13960 		if (rv != 0)
   13961 			return rv;
   13962 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13963 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13964 			*bank = 0;
   13965 			return 0;
   13966 		}
   13967 
   13968 		/* Check bank 1 */
   13969 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13970 		    &nvm_dword);
   13971 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13972 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13973 			*bank = 1;
   13974 			return 0;
   13975 		}
   13976 		aprint_error_dev(sc->sc_dev,
   13977 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13978 		return -1;
   13979 	case WM_T_ICH8:
   13980 	case WM_T_ICH9:
   13981 		eecd = CSR_READ(sc, WMREG_EECD);
   13982 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13983 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13984 			return 0;
   13985 		}
   13986 		/* FALLTHROUGH */
   13987 	default:
   13988 		/* Default to 0 */
   13989 		*bank = 0;
   13990 
   13991 		/* Check bank 0 */
   13992 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13993 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13994 			*bank = 0;
   13995 			return 0;
   13996 		}
   13997 
   13998 		/* Check bank 1 */
   13999 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   14000 		    &sig_byte);
   14001 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14002 			*bank = 1;
   14003 			return 0;
   14004 		}
   14005 	}
   14006 
   14007 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   14008 		device_xname(sc->sc_dev)));
   14009 	return -1;
   14010 }
   14011 
   14012 /******************************************************************************
   14013  * This function does initial flash setup so that a new read/write/erase cycle
   14014  * can be started.
   14015  *
   14016  * sc - The pointer to the hw structure
   14017  ****************************************************************************/
   14018 static int32_t
   14019 wm_ich8_cycle_init(struct wm_softc *sc)
   14020 {
   14021 	uint16_t hsfsts;
   14022 	int32_t error = 1;
   14023 	int32_t i     = 0;
   14024 
   14025 	if (sc->sc_type >= WM_T_PCH_SPT)
   14026 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   14027 	else
   14028 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14029 
   14030 	/* May be check the Flash Des Valid bit in Hw status */
   14031 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   14032 		return error;
   14033 
   14034 	/* Clear FCERR in Hw status by writing 1 */
   14035 	/* Clear DAEL in Hw status by writing a 1 */
   14036 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   14037 
   14038 	if (sc->sc_type >= WM_T_PCH_SPT)
   14039 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   14040 	else
   14041 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14042 
   14043 	/*
   14044 	 * Either we should have a hardware SPI cycle in progress bit to check
   14045 	 * against, in order to start a new cycle or FDONE bit should be
   14046 	 * changed in the hardware so that it is 1 after hardware reset, which
   14047 	 * can then be used as an indication whether a cycle is in progress or
   14048 	 * has been completed .. we should also have some software semaphore
   14049 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   14050 	 * threads access to those bits can be sequentiallized or a way so that
   14051 	 * 2 threads don't start the cycle at the same time
   14052 	 */
   14053 
   14054 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14055 		/*
   14056 		 * There is no cycle running at present, so we can start a
   14057 		 * cycle
   14058 		 */
   14059 
   14060 		/* Begin by setting Flash Cycle Done. */
   14061 		hsfsts |= HSFSTS_DONE;
   14062 		if (sc->sc_type >= WM_T_PCH_SPT)
   14063 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14064 			    hsfsts & 0xffffUL);
   14065 		else
   14066 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14067 		error = 0;
   14068 	} else {
   14069 		/*
   14070 		 * Otherwise poll for sometime so the current cycle has a
   14071 		 * chance to end before giving up.
   14072 		 */
   14073 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   14074 			if (sc->sc_type >= WM_T_PCH_SPT)
   14075 				hsfsts = ICH8_FLASH_READ32(sc,
   14076 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14077 			else
   14078 				hsfsts = ICH8_FLASH_READ16(sc,
   14079 				    ICH_FLASH_HSFSTS);
   14080 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14081 				error = 0;
   14082 				break;
   14083 			}
   14084 			delay(1);
   14085 		}
   14086 		if (error == 0) {
   14087 			/*
   14088 			 * Successful in waiting for previous cycle to timeout,
   14089 			 * now set the Flash Cycle Done.
   14090 			 */
   14091 			hsfsts |= HSFSTS_DONE;
   14092 			if (sc->sc_type >= WM_T_PCH_SPT)
   14093 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14094 				    hsfsts & 0xffffUL);
   14095 			else
   14096 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   14097 				    hsfsts);
   14098 		}
   14099 	}
   14100 	return error;
   14101 }
   14102 
   14103 /******************************************************************************
   14104  * This function starts a flash cycle and waits for its completion
   14105  *
   14106  * sc - The pointer to the hw structure
   14107  ****************************************************************************/
   14108 static int32_t
   14109 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   14110 {
   14111 	uint16_t hsflctl;
   14112 	uint16_t hsfsts;
   14113 	int32_t error = 1;
   14114 	uint32_t i = 0;
   14115 
   14116 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   14117 	if (sc->sc_type >= WM_T_PCH_SPT)
   14118 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   14119 	else
   14120 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14121 	hsflctl |= HSFCTL_GO;
   14122 	if (sc->sc_type >= WM_T_PCH_SPT)
   14123 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14124 		    (uint32_t)hsflctl << 16);
   14125 	else
   14126 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14127 
   14128 	/* Wait till FDONE bit is set to 1 */
   14129 	do {
   14130 		if (sc->sc_type >= WM_T_PCH_SPT)
   14131 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14132 			    & 0xffffUL;
   14133 		else
   14134 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14135 		if (hsfsts & HSFSTS_DONE)
   14136 			break;
   14137 		delay(1);
   14138 		i++;
   14139 	} while (i < timeout);
   14140 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   14141 		error = 0;
   14142 
   14143 	return error;
   14144 }
   14145 
   14146 /******************************************************************************
   14147  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   14148  *
   14149  * sc - The pointer to the hw structure
   14150  * index - The index of the byte or word to read.
   14151  * size - Size of data to read, 1=byte 2=word, 4=dword
   14152  * data - Pointer to the word to store the value read.
   14153  *****************************************************************************/
   14154 static int32_t
   14155 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   14156     uint32_t size, uint32_t *data)
   14157 {
   14158 	uint16_t hsfsts;
   14159 	uint16_t hsflctl;
   14160 	uint32_t flash_linear_address;
   14161 	uint32_t flash_data = 0;
   14162 	int32_t error = 1;
   14163 	int32_t count = 0;
   14164 
   14165 	if (size < 1  || size > 4 || data == 0x0 ||
   14166 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   14167 		return error;
   14168 
   14169 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   14170 	    sc->sc_ich8_flash_base;
   14171 
   14172 	do {
   14173 		delay(1);
   14174 		/* Steps */
   14175 		error = wm_ich8_cycle_init(sc);
   14176 		if (error)
   14177 			break;
   14178 
   14179 		if (sc->sc_type >= WM_T_PCH_SPT)
   14180 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14181 			    >> 16;
   14182 		else
   14183 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14184 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   14185 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   14186 		    & HSFCTL_BCOUNT_MASK;
   14187 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   14188 		if (sc->sc_type >= WM_T_PCH_SPT) {
   14189 			/*
   14190 			 * In SPT, This register is in Lan memory space, not
   14191 			 * flash. Therefore, only 32 bit access is supported.
   14192 			 */
   14193 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14194 			    (uint32_t)hsflctl << 16);
   14195 		} else
   14196 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14197 
   14198 		/*
   14199 		 * Write the last 24 bits of index into Flash Linear address
   14200 		 * field in Flash Address
   14201 		 */
   14202 		/* TODO: TBD maybe check the index against the size of flash */
   14203 
   14204 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   14205 
   14206 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   14207 
   14208 		/*
   14209 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   14210 		 * the whole sequence a few more times, else read in (shift in)
   14211 		 * the Flash Data0, the order is least significant byte first
   14212 		 * msb to lsb
   14213 		 */
   14214 		if (error == 0) {
   14215 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   14216 			if (size == 1)
   14217 				*data = (uint8_t)(flash_data & 0x000000FF);
   14218 			else if (size == 2)
   14219 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   14220 			else if (size == 4)
   14221 				*data = (uint32_t)flash_data;
   14222 			break;
   14223 		} else {
   14224 			/*
   14225 			 * If we've gotten here, then things are probably
   14226 			 * completely hosed, but if the error condition is
   14227 			 * detected, it won't hurt to give it another try...
   14228 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   14229 			 */
   14230 			if (sc->sc_type >= WM_T_PCH_SPT)
   14231 				hsfsts = ICH8_FLASH_READ32(sc,
   14232 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14233 			else
   14234 				hsfsts = ICH8_FLASH_READ16(sc,
   14235 				    ICH_FLASH_HSFSTS);
   14236 
   14237 			if (hsfsts & HSFSTS_ERR) {
   14238 				/* Repeat for some time before giving up. */
   14239 				continue;
   14240 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   14241 				break;
   14242 		}
   14243 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   14244 
   14245 	return error;
   14246 }
   14247 
   14248 /******************************************************************************
   14249  * Reads a single byte from the NVM using the ICH8 flash access registers.
   14250  *
   14251  * sc - pointer to wm_hw structure
   14252  * index - The index of the byte to read.
   14253  * data - Pointer to a byte to store the value read.
   14254  *****************************************************************************/
   14255 static int32_t
   14256 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   14257 {
   14258 	int32_t status;
   14259 	uint32_t word = 0;
   14260 
   14261 	status = wm_read_ich8_data(sc, index, 1, &word);
   14262 	if (status == 0)
   14263 		*data = (uint8_t)word;
   14264 	else
   14265 		*data = 0;
   14266 
   14267 	return status;
   14268 }
   14269 
   14270 /******************************************************************************
   14271  * Reads a word from the NVM using the ICH8 flash access registers.
   14272  *
   14273  * sc - pointer to wm_hw structure
   14274  * index - The starting byte index of the word to read.
   14275  * data - Pointer to a word to store the value read.
   14276  *****************************************************************************/
   14277 static int32_t
   14278 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   14279 {
   14280 	int32_t status;
   14281 	uint32_t word = 0;
   14282 
   14283 	status = wm_read_ich8_data(sc, index, 2, &word);
   14284 	if (status == 0)
   14285 		*data = (uint16_t)word;
   14286 	else
   14287 		*data = 0;
   14288 
   14289 	return status;
   14290 }
   14291 
   14292 /******************************************************************************
   14293  * Reads a dword from the NVM using the ICH8 flash access registers.
   14294  *
   14295  * sc - pointer to wm_hw structure
   14296  * index - The starting byte index of the word to read.
   14297  * data - Pointer to a word to store the value read.
   14298  *****************************************************************************/
   14299 static int32_t
   14300 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   14301 {
   14302 	int32_t status;
   14303 
   14304 	status = wm_read_ich8_data(sc, index, 4, data);
   14305 	return status;
   14306 }
   14307 
   14308 /******************************************************************************
   14309  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   14310  * register.
   14311  *
   14312  * sc - Struct containing variables accessed by shared code
   14313  * offset - offset of word in the EEPROM to read
   14314  * data - word read from the EEPROM
   14315  * words - number of words to read
   14316  *****************************************************************************/
   14317 static int
   14318 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14319 {
   14320 	int rv;
   14321 	uint32_t flash_bank = 0;
   14322 	uint32_t act_offset = 0;
   14323 	uint32_t bank_offset = 0;
   14324 	uint16_t word = 0;
   14325 	uint16_t i = 0;
   14326 
   14327 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14328 		device_xname(sc->sc_dev), __func__));
   14329 
   14330 	rv = sc->nvm.acquire(sc);
   14331 	if (rv != 0)
   14332 		return rv;
   14333 
   14334 	/*
   14335 	 * We need to know which is the valid flash bank.  In the event
   14336 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14337 	 * managing flash_bank. So it cannot be trusted and needs
   14338 	 * to be updated with each read.
   14339 	 */
   14340 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14341 	if (rv) {
   14342 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14343 			device_xname(sc->sc_dev)));
   14344 		flash_bank = 0;
   14345 	}
   14346 
   14347 	/*
   14348 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14349 	 * size
   14350 	 */
   14351 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14352 
   14353 	for (i = 0; i < words; i++) {
   14354 		/* The NVM part needs a byte offset, hence * 2 */
   14355 		act_offset = bank_offset + ((offset + i) * 2);
   14356 		rv = wm_read_ich8_word(sc, act_offset, &word);
   14357 		if (rv) {
   14358 			aprint_error_dev(sc->sc_dev,
   14359 			    "%s: failed to read NVM\n", __func__);
   14360 			break;
   14361 		}
   14362 		data[i] = word;
   14363 	}
   14364 
   14365 	sc->nvm.release(sc);
   14366 	return rv;
   14367 }
   14368 
   14369 /******************************************************************************
   14370  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   14371  * register.
   14372  *
   14373  * sc - Struct containing variables accessed by shared code
   14374  * offset - offset of word in the EEPROM to read
   14375  * data - word read from the EEPROM
   14376  * words - number of words to read
   14377  *****************************************************************************/
   14378 static int
   14379 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14380 {
   14381 	int	 rv;
   14382 	uint32_t flash_bank = 0;
   14383 	uint32_t act_offset = 0;
   14384 	uint32_t bank_offset = 0;
   14385 	uint32_t dword = 0;
   14386 	uint16_t i = 0;
   14387 
   14388 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14389 		device_xname(sc->sc_dev), __func__));
   14390 
   14391 	rv = sc->nvm.acquire(sc);
   14392 	if (rv != 0)
   14393 		return rv;
   14394 
   14395 	/*
   14396 	 * We need to know which is the valid flash bank.  In the event
   14397 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14398 	 * managing flash_bank. So it cannot be trusted and needs
   14399 	 * to be updated with each read.
   14400 	 */
   14401 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14402 	if (rv) {
   14403 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14404 			device_xname(sc->sc_dev)));
   14405 		flash_bank = 0;
   14406 	}
   14407 
   14408 	/*
   14409 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14410 	 * size
   14411 	 */
   14412 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14413 
   14414 	for (i = 0; i < words; i++) {
   14415 		/* The NVM part needs a byte offset, hence * 2 */
   14416 		act_offset = bank_offset + ((offset + i) * 2);
   14417 		/* but we must read dword aligned, so mask ... */
   14418 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   14419 		if (rv) {
   14420 			aprint_error_dev(sc->sc_dev,
   14421 			    "%s: failed to read NVM\n", __func__);
   14422 			break;
   14423 		}
   14424 		/* ... and pick out low or high word */
   14425 		if ((act_offset & 0x2) == 0)
   14426 			data[i] = (uint16_t)(dword & 0xFFFF);
   14427 		else
   14428 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   14429 	}
   14430 
   14431 	sc->nvm.release(sc);
   14432 	return rv;
   14433 }
   14434 
   14435 /* iNVM */
   14436 
   14437 static int
   14438 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   14439 {
   14440 	int32_t	 rv = 0;
   14441 	uint32_t invm_dword;
   14442 	uint16_t i;
   14443 	uint8_t record_type, word_address;
   14444 
   14445 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14446 		device_xname(sc->sc_dev), __func__));
   14447 
   14448 	for (i = 0; i < INVM_SIZE; i++) {
   14449 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   14450 		/* Get record type */
   14451 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   14452 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   14453 			break;
   14454 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   14455 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   14456 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   14457 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   14458 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   14459 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   14460 			if (word_address == address) {
   14461 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   14462 				rv = 0;
   14463 				break;
   14464 			}
   14465 		}
   14466 	}
   14467 
   14468 	return rv;
   14469 }
   14470 
   14471 static int
   14472 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14473 {
   14474 	int i, rv;
   14475 
   14476 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14477 		device_xname(sc->sc_dev), __func__));
   14478 
   14479 	rv = sc->nvm.acquire(sc);
   14480 	if (rv != 0)
   14481 		return rv;
   14482 
   14483 	for (i = 0; i < words; i++) {
   14484 		switch (offset + i) {
   14485 		case NVM_OFF_MACADDR:
   14486 		case NVM_OFF_MACADDR1:
   14487 		case NVM_OFF_MACADDR2:
   14488 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   14489 			if (rv != 0) {
   14490 				data[i] = 0xffff;
   14491 				rv = -1;
   14492 			}
   14493 			break;
   14494 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   14495 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14496 			if (rv != 0) {
   14497 				*data = INVM_DEFAULT_AL;
   14498 				rv = 0;
   14499 			}
   14500 			break;
   14501 		case NVM_OFF_CFG2:
   14502 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14503 			if (rv != 0) {
   14504 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   14505 				rv = 0;
   14506 			}
   14507 			break;
   14508 		case NVM_OFF_CFG4:
   14509 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14510 			if (rv != 0) {
   14511 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   14512 				rv = 0;
   14513 			}
   14514 			break;
   14515 		case NVM_OFF_LED_1_CFG:
   14516 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14517 			if (rv != 0) {
   14518 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   14519 				rv = 0;
   14520 			}
   14521 			break;
   14522 		case NVM_OFF_LED_0_2_CFG:
   14523 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14524 			if (rv != 0) {
   14525 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   14526 				rv = 0;
   14527 			}
   14528 			break;
   14529 		case NVM_OFF_ID_LED_SETTINGS:
   14530 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14531 			if (rv != 0) {
   14532 				*data = ID_LED_RESERVED_FFFF;
   14533 				rv = 0;
   14534 			}
   14535 			break;
   14536 		default:
   14537 			DPRINTF(sc, WM_DEBUG_NVM,
   14538 			    ("NVM word 0x%02x is not mapped.\n", offset));
   14539 			*data = NVM_RESERVED_WORD;
   14540 			break;
   14541 		}
   14542 	}
   14543 
   14544 	sc->nvm.release(sc);
   14545 	return rv;
   14546 }
   14547 
   14548 /* Lock, detecting NVM type, validate checksum, version and read */
   14549 
   14550 static int
   14551 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   14552 {
   14553 	uint32_t eecd = 0;
   14554 
   14555 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   14556 	    || sc->sc_type == WM_T_82583) {
   14557 		eecd = CSR_READ(sc, WMREG_EECD);
   14558 
   14559 		/* Isolate bits 15 & 16 */
   14560 		eecd = ((eecd >> 15) & 0x03);
   14561 
   14562 		/* If both bits are set, device is Flash type */
   14563 		if (eecd == 0x03)
   14564 			return 0;
   14565 	}
   14566 	return 1;
   14567 }
   14568 
   14569 static int
   14570 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   14571 {
   14572 	uint32_t eec;
   14573 
   14574 	eec = CSR_READ(sc, WMREG_EEC);
   14575 	if ((eec & EEC_FLASH_DETECTED) != 0)
   14576 		return 1;
   14577 
   14578 	return 0;
   14579 }
   14580 
   14581 /*
   14582  * wm_nvm_validate_checksum
   14583  *
   14584  * The checksum is defined as the sum of the first 64 (16 bit) words.
   14585  */
   14586 static int
   14587 wm_nvm_validate_checksum(struct wm_softc *sc)
   14588 {
   14589 	uint16_t checksum;
   14590 	uint16_t eeprom_data;
   14591 #ifdef WM_DEBUG
   14592 	uint16_t csum_wordaddr, valid_checksum;
   14593 #endif
   14594 	int i;
   14595 
   14596 	checksum = 0;
   14597 
   14598 	/* Don't check for I211 */
   14599 	if (sc->sc_type == WM_T_I211)
   14600 		return 0;
   14601 
   14602 #ifdef WM_DEBUG
   14603 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   14604 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   14605 		csum_wordaddr = NVM_OFF_COMPAT;
   14606 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   14607 	} else {
   14608 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   14609 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   14610 	}
   14611 
   14612 	/* Dump EEPROM image for debug */
   14613 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14614 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14615 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   14616 		/* XXX PCH_SPT? */
   14617 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   14618 		if ((eeprom_data & valid_checksum) == 0)
   14619 			DPRINTF(sc, WM_DEBUG_NVM,
   14620 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   14621 				device_xname(sc->sc_dev), eeprom_data,
   14622 				    valid_checksum));
   14623 	}
   14624 
   14625 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   14626 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   14627 		for (i = 0; i < NVM_SIZE; i++) {
   14628 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14629 				printf("XXXX ");
   14630 			else
   14631 				printf("%04hx ", eeprom_data);
   14632 			if (i % 8 == 7)
   14633 				printf("\n");
   14634 		}
   14635 	}
   14636 
   14637 #endif /* WM_DEBUG */
   14638 
   14639 	for (i = 0; i < NVM_SIZE; i++) {
   14640 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14641 			return -1;
   14642 		checksum += eeprom_data;
   14643 	}
   14644 
   14645 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14646 #ifdef WM_DEBUG
   14647 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14648 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14649 #endif
   14650 	}
   14651 
   14652 	return 0;
   14653 }
   14654 
   14655 static void
   14656 wm_nvm_version_invm(struct wm_softc *sc)
   14657 {
   14658 	uint32_t dword;
   14659 
   14660 	/*
   14661 	 * Linux's code to decode version is very strange, so we don't
   14662 	 * obey that algorithm and just use word 61 as the document.
   14663 	 * Perhaps it's not perfect though...
   14664 	 *
   14665 	 * Example:
   14666 	 *
   14667 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14668 	 */
   14669 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14670 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14671 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14672 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14673 }
   14674 
   14675 static void
   14676 wm_nvm_version(struct wm_softc *sc)
   14677 {
   14678 	uint16_t major, minor, build, patch;
   14679 	uint16_t uid0, uid1;
   14680 	uint16_t nvm_data;
   14681 	uint16_t off;
   14682 	bool check_version = false;
   14683 	bool check_optionrom = false;
   14684 	bool have_build = false;
   14685 	bool have_uid = true;
   14686 
   14687 	/*
   14688 	 * Version format:
   14689 	 *
   14690 	 * XYYZ
   14691 	 * X0YZ
   14692 	 * X0YY
   14693 	 *
   14694 	 * Example:
   14695 	 *
   14696 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14697 	 *	82571	0x50a6	5.10.6?
   14698 	 *	82572	0x506a	5.6.10?
   14699 	 *	82572EI	0x5069	5.6.9?
   14700 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14701 	 *		0x2013	2.1.3?
   14702 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14703 	 * ICH8+82567	0x0040	0.4.0?
   14704 	 * ICH9+82566	0x1040	1.4.0?
   14705 	 *ICH10+82567	0x0043	0.4.3?
   14706 	 *  PCH+82577	0x00c1	0.12.1?
   14707 	 * PCH2+82579	0x00d3	0.13.3?
   14708 	 *		0x00d4	0.13.4?
   14709 	 *  LPT+I218	0x0023	0.2.3?
   14710 	 *  SPT+I219	0x0084	0.8.4?
   14711 	 *  CNP+I219	0x0054	0.5.4?
   14712 	 */
   14713 
   14714 	/*
   14715 	 * XXX
   14716 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14717 	 * I've never seen real 82574 hardware with such small SPI ROM.
   14718 	 */
   14719 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14720 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14721 		have_uid = false;
   14722 
   14723 	switch (sc->sc_type) {
   14724 	case WM_T_82571:
   14725 	case WM_T_82572:
   14726 	case WM_T_82574:
   14727 	case WM_T_82583:
   14728 		check_version = true;
   14729 		check_optionrom = true;
   14730 		have_build = true;
   14731 		break;
   14732 	case WM_T_ICH8:
   14733 	case WM_T_ICH9:
   14734 	case WM_T_ICH10:
   14735 	case WM_T_PCH:
   14736 	case WM_T_PCH2:
   14737 	case WM_T_PCH_LPT:
   14738 	case WM_T_PCH_SPT:
   14739 	case WM_T_PCH_CNP:
   14740 		check_version = true;
   14741 		have_build = true;
   14742 		have_uid = false;
   14743 		break;
   14744 	case WM_T_82575:
   14745 	case WM_T_82576:
   14746 	case WM_T_82580:
   14747 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14748 			check_version = true;
   14749 		break;
   14750 	case WM_T_I211:
   14751 		wm_nvm_version_invm(sc);
   14752 		have_uid = false;
   14753 		goto printver;
   14754 	case WM_T_I210:
   14755 		if (!wm_nvm_flash_presence_i210(sc)) {
   14756 			wm_nvm_version_invm(sc);
   14757 			have_uid = false;
   14758 			goto printver;
   14759 		}
   14760 		/* FALLTHROUGH */
   14761 	case WM_T_I350:
   14762 	case WM_T_I354:
   14763 		check_version = true;
   14764 		check_optionrom = true;
   14765 		break;
   14766 	default:
   14767 		return;
   14768 	}
   14769 	if (check_version
   14770 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14771 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14772 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14773 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14774 			build = nvm_data & NVM_BUILD_MASK;
   14775 			have_build = true;
   14776 		} else
   14777 			minor = nvm_data & 0x00ff;
   14778 
   14779 		/* Decimal */
   14780 		minor = (minor / 16) * 10 + (minor % 16);
   14781 		sc->sc_nvm_ver_major = major;
   14782 		sc->sc_nvm_ver_minor = minor;
   14783 
   14784 printver:
   14785 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14786 		    sc->sc_nvm_ver_minor);
   14787 		if (have_build) {
   14788 			sc->sc_nvm_ver_build = build;
   14789 			aprint_verbose(".%d", build);
   14790 		}
   14791 	}
   14792 
   14793 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14794 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14795 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14796 		/* Option ROM Version */
   14797 		if ((off != 0x0000) && (off != 0xffff)) {
   14798 			int rv;
   14799 
   14800 			off += NVM_COMBO_VER_OFF;
   14801 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14802 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14803 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14804 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14805 				/* 16bits */
   14806 				major = uid0 >> 8;
   14807 				build = (uid0 << 8) | (uid1 >> 8);
   14808 				patch = uid1 & 0x00ff;
   14809 				aprint_verbose(", option ROM Version %d.%d.%d",
   14810 				    major, build, patch);
   14811 			}
   14812 		}
   14813 	}
   14814 
   14815 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14816 		aprint_verbose(", Image Unique ID %08x",
   14817 		    ((uint32_t)uid1 << 16) | uid0);
   14818 }
   14819 
   14820 /*
   14821  * wm_nvm_read:
   14822  *
   14823  *	Read data from the serial EEPROM.
   14824  */
   14825 static int
   14826 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14827 {
   14828 	int rv;
   14829 
   14830 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14831 		device_xname(sc->sc_dev), __func__));
   14832 
   14833 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14834 		return -1;
   14835 
   14836 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14837 
   14838 	return rv;
   14839 }
   14840 
   14841 /*
   14842  * Hardware semaphores.
   14843  * Very complexed...
   14844  */
   14845 
   14846 static int
   14847 wm_get_null(struct wm_softc *sc)
   14848 {
   14849 
   14850 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14851 		device_xname(sc->sc_dev), __func__));
   14852 	return 0;
   14853 }
   14854 
   14855 static void
   14856 wm_put_null(struct wm_softc *sc)
   14857 {
   14858 
   14859 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14860 		device_xname(sc->sc_dev), __func__));
   14861 	return;
   14862 }
   14863 
   14864 static int
   14865 wm_get_eecd(struct wm_softc *sc)
   14866 {
   14867 	uint32_t reg;
   14868 	int x;
   14869 
   14870 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14871 		device_xname(sc->sc_dev), __func__));
   14872 
   14873 	reg = CSR_READ(sc, WMREG_EECD);
   14874 
   14875 	/* Request EEPROM access. */
   14876 	reg |= EECD_EE_REQ;
   14877 	CSR_WRITE(sc, WMREG_EECD, reg);
   14878 
   14879 	/* ..and wait for it to be granted. */
   14880 	for (x = 0; x < 1000; x++) {
   14881 		reg = CSR_READ(sc, WMREG_EECD);
   14882 		if (reg & EECD_EE_GNT)
   14883 			break;
   14884 		delay(5);
   14885 	}
   14886 	if ((reg & EECD_EE_GNT) == 0) {
   14887 		aprint_error_dev(sc->sc_dev,
   14888 		    "could not acquire EEPROM GNT\n");
   14889 		reg &= ~EECD_EE_REQ;
   14890 		CSR_WRITE(sc, WMREG_EECD, reg);
   14891 		return -1;
   14892 	}
   14893 
   14894 	return 0;
   14895 }
   14896 
   14897 static void
   14898 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14899 {
   14900 
   14901 	*eecd |= EECD_SK;
   14902 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14903 	CSR_WRITE_FLUSH(sc);
   14904 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14905 		delay(1);
   14906 	else
   14907 		delay(50);
   14908 }
   14909 
   14910 static void
   14911 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14912 {
   14913 
   14914 	*eecd &= ~EECD_SK;
   14915 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14916 	CSR_WRITE_FLUSH(sc);
   14917 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14918 		delay(1);
   14919 	else
   14920 		delay(50);
   14921 }
   14922 
   14923 static void
   14924 wm_put_eecd(struct wm_softc *sc)
   14925 {
   14926 	uint32_t reg;
   14927 
   14928 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14929 		device_xname(sc->sc_dev), __func__));
   14930 
   14931 	/* Stop nvm */
   14932 	reg = CSR_READ(sc, WMREG_EECD);
   14933 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14934 		/* Pull CS high */
   14935 		reg |= EECD_CS;
   14936 		wm_nvm_eec_clock_lower(sc, &reg);
   14937 	} else {
   14938 		/* CS on Microwire is active-high */
   14939 		reg &= ~(EECD_CS | EECD_DI);
   14940 		CSR_WRITE(sc, WMREG_EECD, reg);
   14941 		wm_nvm_eec_clock_raise(sc, &reg);
   14942 		wm_nvm_eec_clock_lower(sc, &reg);
   14943 	}
   14944 
   14945 	reg = CSR_READ(sc, WMREG_EECD);
   14946 	reg &= ~EECD_EE_REQ;
   14947 	CSR_WRITE(sc, WMREG_EECD, reg);
   14948 
   14949 	return;
   14950 }
   14951 
   14952 /*
   14953  * Get hardware semaphore.
   14954  * Same as e1000_get_hw_semaphore_generic()
   14955  */
   14956 static int
   14957 wm_get_swsm_semaphore(struct wm_softc *sc)
   14958 {
   14959 	int32_t timeout;
   14960 	uint32_t swsm;
   14961 
   14962 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14963 		device_xname(sc->sc_dev), __func__));
   14964 	KASSERT(sc->sc_nvm_wordsize > 0);
   14965 
   14966 retry:
   14967 	/* Get the SW semaphore. */
   14968 	timeout = sc->sc_nvm_wordsize + 1;
   14969 	while (timeout) {
   14970 		swsm = CSR_READ(sc, WMREG_SWSM);
   14971 
   14972 		if ((swsm & SWSM_SMBI) == 0)
   14973 			break;
   14974 
   14975 		delay(50);
   14976 		timeout--;
   14977 	}
   14978 
   14979 	if (timeout == 0) {
   14980 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14981 			/*
   14982 			 * In rare circumstances, the SW semaphore may already
   14983 			 * be held unintentionally. Clear the semaphore once
   14984 			 * before giving up.
   14985 			 */
   14986 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14987 			wm_put_swsm_semaphore(sc);
   14988 			goto retry;
   14989 		}
   14990 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   14991 		return -1;
   14992 	}
   14993 
   14994 	/* Get the FW semaphore. */
   14995 	timeout = sc->sc_nvm_wordsize + 1;
   14996 	while (timeout) {
   14997 		swsm = CSR_READ(sc, WMREG_SWSM);
   14998 		swsm |= SWSM_SWESMBI;
   14999 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   15000 		/* If we managed to set the bit we got the semaphore. */
   15001 		swsm = CSR_READ(sc, WMREG_SWSM);
   15002 		if (swsm & SWSM_SWESMBI)
   15003 			break;
   15004 
   15005 		delay(50);
   15006 		timeout--;
   15007 	}
   15008 
   15009 	if (timeout == 0) {
   15010 		aprint_error_dev(sc->sc_dev,
   15011 		    "could not acquire SWSM SWESMBI\n");
   15012 		/* Release semaphores */
   15013 		wm_put_swsm_semaphore(sc);
   15014 		return -1;
   15015 	}
   15016 	return 0;
   15017 }
   15018 
   15019 /*
   15020  * Put hardware semaphore.
   15021  * Same as e1000_put_hw_semaphore_generic()
   15022  */
   15023 static void
   15024 wm_put_swsm_semaphore(struct wm_softc *sc)
   15025 {
   15026 	uint32_t swsm;
   15027 
   15028 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15029 		device_xname(sc->sc_dev), __func__));
   15030 
   15031 	swsm = CSR_READ(sc, WMREG_SWSM);
   15032 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   15033 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   15034 }
   15035 
   15036 /*
   15037  * Get SW/FW semaphore.
   15038  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   15039  */
   15040 static int
   15041 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15042 {
   15043 	uint32_t swfw_sync;
   15044 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   15045 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   15046 	int timeout;
   15047 
   15048 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15049 		device_xname(sc->sc_dev), __func__));
   15050 
   15051 	if (sc->sc_type == WM_T_80003)
   15052 		timeout = 50;
   15053 	else
   15054 		timeout = 200;
   15055 
   15056 	while (timeout) {
   15057 		if (wm_get_swsm_semaphore(sc)) {
   15058 			aprint_error_dev(sc->sc_dev,
   15059 			    "%s: failed to get semaphore\n",
   15060 			    __func__);
   15061 			return -1;
   15062 		}
   15063 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15064 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   15065 			swfw_sync |= swmask;
   15066 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15067 			wm_put_swsm_semaphore(sc);
   15068 			return 0;
   15069 		}
   15070 		wm_put_swsm_semaphore(sc);
   15071 		delay(5000);
   15072 		timeout--;
   15073 	}
   15074 	device_printf(sc->sc_dev,
   15075 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   15076 	    mask, swfw_sync);
   15077 	return -1;
   15078 }
   15079 
   15080 static void
   15081 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15082 {
   15083 	uint32_t swfw_sync;
   15084 
   15085 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15086 		device_xname(sc->sc_dev), __func__));
   15087 
   15088 	while (wm_get_swsm_semaphore(sc) != 0)
   15089 		continue;
   15090 
   15091 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15092 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   15093 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15094 
   15095 	wm_put_swsm_semaphore(sc);
   15096 }
   15097 
   15098 static int
   15099 wm_get_nvm_80003(struct wm_softc *sc)
   15100 {
   15101 	int rv;
   15102 
   15103 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15104 		device_xname(sc->sc_dev), __func__));
   15105 
   15106 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   15107 		aprint_error_dev(sc->sc_dev,
   15108 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   15109 		return rv;
   15110 	}
   15111 
   15112 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15113 	    && (rv = wm_get_eecd(sc)) != 0) {
   15114 		aprint_error_dev(sc->sc_dev,
   15115 		    "%s: failed to get semaphore(EECD)\n", __func__);
   15116 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15117 		return rv;
   15118 	}
   15119 
   15120 	return 0;
   15121 }
   15122 
   15123 static void
   15124 wm_put_nvm_80003(struct wm_softc *sc)
   15125 {
   15126 
   15127 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15128 		device_xname(sc->sc_dev), __func__));
   15129 
   15130 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15131 		wm_put_eecd(sc);
   15132 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15133 }
   15134 
   15135 static int
   15136 wm_get_nvm_82571(struct wm_softc *sc)
   15137 {
   15138 	int rv;
   15139 
   15140 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15141 		device_xname(sc->sc_dev), __func__));
   15142 
   15143 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   15144 		return rv;
   15145 
   15146 	switch (sc->sc_type) {
   15147 	case WM_T_82573:
   15148 		break;
   15149 	default:
   15150 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15151 			rv = wm_get_eecd(sc);
   15152 		break;
   15153 	}
   15154 
   15155 	if (rv != 0) {
   15156 		aprint_error_dev(sc->sc_dev,
   15157 		    "%s: failed to get semaphore\n",
   15158 		    __func__);
   15159 		wm_put_swsm_semaphore(sc);
   15160 	}
   15161 
   15162 	return rv;
   15163 }
   15164 
   15165 static void
   15166 wm_put_nvm_82571(struct wm_softc *sc)
   15167 {
   15168 
   15169 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15170 		device_xname(sc->sc_dev), __func__));
   15171 
   15172 	switch (sc->sc_type) {
   15173 	case WM_T_82573:
   15174 		break;
   15175 	default:
   15176 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15177 			wm_put_eecd(sc);
   15178 		break;
   15179 	}
   15180 
   15181 	wm_put_swsm_semaphore(sc);
   15182 }
   15183 
   15184 static int
   15185 wm_get_phy_82575(struct wm_softc *sc)
   15186 {
   15187 
   15188 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15189 		device_xname(sc->sc_dev), __func__));
   15190 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15191 }
   15192 
   15193 static void
   15194 wm_put_phy_82575(struct wm_softc *sc)
   15195 {
   15196 
   15197 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15198 		device_xname(sc->sc_dev), __func__));
   15199 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15200 }
   15201 
   15202 static int
   15203 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   15204 {
   15205 	uint32_t ext_ctrl;
   15206 	int timeout = 200;
   15207 
   15208 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15209 		device_xname(sc->sc_dev), __func__));
   15210 
   15211 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15212 	for (timeout = 0; timeout < 200; timeout++) {
   15213 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15214 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15215 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15216 
   15217 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15218 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15219 			return 0;
   15220 		delay(5000);
   15221 	}
   15222 	device_printf(sc->sc_dev,
   15223 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   15224 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15225 	return -1;
   15226 }
   15227 
   15228 static void
   15229 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   15230 {
   15231 	uint32_t ext_ctrl;
   15232 
   15233 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15234 		device_xname(sc->sc_dev), __func__));
   15235 
   15236 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15237 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15238 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15239 
   15240 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15241 }
   15242 
   15243 static int
   15244 wm_get_swflag_ich8lan(struct wm_softc *sc)
   15245 {
   15246 	uint32_t ext_ctrl;
   15247 	int timeout;
   15248 
   15249 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15250 		device_xname(sc->sc_dev), __func__));
   15251 	mutex_enter(sc->sc_ich_phymtx);
   15252 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   15253 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15254 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   15255 			break;
   15256 		delay(1000);
   15257 	}
   15258 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   15259 		device_printf(sc->sc_dev,
   15260 		    "SW has already locked the resource\n");
   15261 		goto out;
   15262 	}
   15263 
   15264 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15265 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15266 	for (timeout = 0; timeout < 1000; timeout++) {
   15267 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15268 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15269 			break;
   15270 		delay(1000);
   15271 	}
   15272 	if (timeout >= 1000) {
   15273 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   15274 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15275 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15276 		goto out;
   15277 	}
   15278 	return 0;
   15279 
   15280 out:
   15281 	mutex_exit(sc->sc_ich_phymtx);
   15282 	return -1;
   15283 }
   15284 
   15285 static void
   15286 wm_put_swflag_ich8lan(struct wm_softc *sc)
   15287 {
   15288 	uint32_t ext_ctrl;
   15289 
   15290 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15291 		device_xname(sc->sc_dev), __func__));
   15292 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15293 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   15294 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15295 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15296 	} else
   15297 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   15298 
   15299 	mutex_exit(sc->sc_ich_phymtx);
   15300 }
   15301 
   15302 static int
   15303 wm_get_nvm_ich8lan(struct wm_softc *sc)
   15304 {
   15305 
   15306 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15307 		device_xname(sc->sc_dev), __func__));
   15308 	mutex_enter(sc->sc_ich_nvmmtx);
   15309 
   15310 	return 0;
   15311 }
   15312 
   15313 static void
   15314 wm_put_nvm_ich8lan(struct wm_softc *sc)
   15315 {
   15316 
   15317 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15318 		device_xname(sc->sc_dev), __func__));
   15319 	mutex_exit(sc->sc_ich_nvmmtx);
   15320 }
   15321 
   15322 static int
   15323 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   15324 {
   15325 	int i = 0;
   15326 	uint32_t reg;
   15327 
   15328 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15329 		device_xname(sc->sc_dev), __func__));
   15330 
   15331 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15332 	do {
   15333 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   15334 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15335 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15336 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   15337 			break;
   15338 		delay(2*1000);
   15339 		i++;
   15340 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   15341 
   15342 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   15343 		wm_put_hw_semaphore_82573(sc);
   15344 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   15345 		    device_xname(sc->sc_dev));
   15346 		return -1;
   15347 	}
   15348 
   15349 	return 0;
   15350 }
   15351 
   15352 static void
   15353 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   15354 {
   15355 	uint32_t reg;
   15356 
   15357 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15358 		device_xname(sc->sc_dev), __func__));
   15359 
   15360 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15361 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15362 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15363 }
   15364 
   15365 /*
   15366  * Management mode and power management related subroutines.
   15367  * BMC, AMT, suspend/resume and EEE.
   15368  */
   15369 
   15370 #ifdef WM_WOL
   15371 static int
   15372 wm_check_mng_mode(struct wm_softc *sc)
   15373 {
   15374 	int rv;
   15375 
   15376 	switch (sc->sc_type) {
   15377 	case WM_T_ICH8:
   15378 	case WM_T_ICH9:
   15379 	case WM_T_ICH10:
   15380 	case WM_T_PCH:
   15381 	case WM_T_PCH2:
   15382 	case WM_T_PCH_LPT:
   15383 	case WM_T_PCH_SPT:
   15384 	case WM_T_PCH_CNP:
   15385 		rv = wm_check_mng_mode_ich8lan(sc);
   15386 		break;
   15387 	case WM_T_82574:
   15388 	case WM_T_82583:
   15389 		rv = wm_check_mng_mode_82574(sc);
   15390 		break;
   15391 	case WM_T_82571:
   15392 	case WM_T_82572:
   15393 	case WM_T_82573:
   15394 	case WM_T_80003:
   15395 		rv = wm_check_mng_mode_generic(sc);
   15396 		break;
   15397 	default:
   15398 		/* Noting to do */
   15399 		rv = 0;
   15400 		break;
   15401 	}
   15402 
   15403 	return rv;
   15404 }
   15405 
   15406 static int
   15407 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   15408 {
   15409 	uint32_t fwsm;
   15410 
   15411 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15412 
   15413 	if (((fwsm & FWSM_FW_VALID) != 0)
   15414 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15415 		return 1;
   15416 
   15417 	return 0;
   15418 }
   15419 
   15420 static int
   15421 wm_check_mng_mode_82574(struct wm_softc *sc)
   15422 {
   15423 	uint16_t data;
   15424 
   15425 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15426 
   15427 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   15428 		return 1;
   15429 
   15430 	return 0;
   15431 }
   15432 
   15433 static int
   15434 wm_check_mng_mode_generic(struct wm_softc *sc)
   15435 {
   15436 	uint32_t fwsm;
   15437 
   15438 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15439 
   15440 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   15441 		return 1;
   15442 
   15443 	return 0;
   15444 }
   15445 #endif /* WM_WOL */
   15446 
   15447 static int
   15448 wm_enable_mng_pass_thru(struct wm_softc *sc)
   15449 {
   15450 	uint32_t manc, fwsm, factps;
   15451 
   15452 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   15453 		return 0;
   15454 
   15455 	manc = CSR_READ(sc, WMREG_MANC);
   15456 
   15457 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   15458 		device_xname(sc->sc_dev), manc));
   15459 	if ((manc & MANC_RECV_TCO_EN) == 0)
   15460 		return 0;
   15461 
   15462 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   15463 		fwsm = CSR_READ(sc, WMREG_FWSM);
   15464 		factps = CSR_READ(sc, WMREG_FACTPS);
   15465 		if (((factps & FACTPS_MNGCG) == 0)
   15466 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15467 			return 1;
   15468 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   15469 		uint16_t data;
   15470 
   15471 		factps = CSR_READ(sc, WMREG_FACTPS);
   15472 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15473 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   15474 			device_xname(sc->sc_dev), factps, data));
   15475 		if (((factps & FACTPS_MNGCG) == 0)
   15476 		    && ((data & NVM_CFG2_MNGM_MASK)
   15477 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   15478 			return 1;
   15479 	} else if (((manc & MANC_SMBUS_EN) != 0)
   15480 	    && ((manc & MANC_ASF_EN) == 0))
   15481 		return 1;
   15482 
   15483 	return 0;
   15484 }
   15485 
   15486 static bool
   15487 wm_phy_resetisblocked(struct wm_softc *sc)
   15488 {
   15489 	bool blocked = false;
   15490 	uint32_t reg;
   15491 	int i = 0;
   15492 
   15493 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15494 		device_xname(sc->sc_dev), __func__));
   15495 
   15496 	switch (sc->sc_type) {
   15497 	case WM_T_ICH8:
   15498 	case WM_T_ICH9:
   15499 	case WM_T_ICH10:
   15500 	case WM_T_PCH:
   15501 	case WM_T_PCH2:
   15502 	case WM_T_PCH_LPT:
   15503 	case WM_T_PCH_SPT:
   15504 	case WM_T_PCH_CNP:
   15505 		do {
   15506 			reg = CSR_READ(sc, WMREG_FWSM);
   15507 			if ((reg & FWSM_RSPCIPHY) == 0) {
   15508 				blocked = true;
   15509 				delay(10*1000);
   15510 				continue;
   15511 			}
   15512 			blocked = false;
   15513 		} while (blocked && (i++ < 30));
   15514 		return blocked;
   15515 		break;
   15516 	case WM_T_82571:
   15517 	case WM_T_82572:
   15518 	case WM_T_82573:
   15519 	case WM_T_82574:
   15520 	case WM_T_82583:
   15521 	case WM_T_80003:
   15522 		reg = CSR_READ(sc, WMREG_MANC);
   15523 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   15524 			return true;
   15525 		else
   15526 			return false;
   15527 		break;
   15528 	default:
   15529 		/* No problem */
   15530 		break;
   15531 	}
   15532 
   15533 	return false;
   15534 }
   15535 
   15536 static void
   15537 wm_get_hw_control(struct wm_softc *sc)
   15538 {
   15539 	uint32_t reg;
   15540 
   15541 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15542 		device_xname(sc->sc_dev), __func__));
   15543 
   15544 	if (sc->sc_type == WM_T_82573) {
   15545 		reg = CSR_READ(sc, WMREG_SWSM);
   15546 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   15547 	} else if (sc->sc_type >= WM_T_82571) {
   15548 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15549 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   15550 	}
   15551 }
   15552 
   15553 static void
   15554 wm_release_hw_control(struct wm_softc *sc)
   15555 {
   15556 	uint32_t reg;
   15557 
   15558 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15559 		device_xname(sc->sc_dev), __func__));
   15560 
   15561 	if (sc->sc_type == WM_T_82573) {
   15562 		reg = CSR_READ(sc, WMREG_SWSM);
   15563 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   15564 	} else if (sc->sc_type >= WM_T_82571) {
   15565 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15566 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   15567 	}
   15568 }
   15569 
   15570 static void
   15571 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   15572 {
   15573 	uint32_t reg;
   15574 
   15575 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15576 		device_xname(sc->sc_dev), __func__));
   15577 
   15578 	if (sc->sc_type < WM_T_PCH2)
   15579 		return;
   15580 
   15581 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15582 
   15583 	if (gate)
   15584 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   15585 	else
   15586 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   15587 
   15588 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15589 }
   15590 
   15591 static int
   15592 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   15593 {
   15594 	uint32_t fwsm, reg;
   15595 	int rv;
   15596 
   15597 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15598 		device_xname(sc->sc_dev), __func__));
   15599 
   15600 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   15601 	wm_gate_hw_phy_config_ich8lan(sc, true);
   15602 
   15603 	/* Disable ULP */
   15604 	wm_ulp_disable(sc);
   15605 
   15606 	/* Acquire PHY semaphore */
   15607 	rv = sc->phy.acquire(sc);
   15608 	if (rv != 0) {
   15609 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15610 		device_xname(sc->sc_dev), __func__));
   15611 		return rv;
   15612 	}
   15613 
   15614 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   15615 	 * inaccessible and resetting the PHY is not blocked, toggle the
   15616 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   15617 	 */
   15618 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15619 	switch (sc->sc_type) {
   15620 	case WM_T_PCH_LPT:
   15621 	case WM_T_PCH_SPT:
   15622 	case WM_T_PCH_CNP:
   15623 		if (wm_phy_is_accessible_pchlan(sc))
   15624 			break;
   15625 
   15626 		/* Before toggling LANPHYPC, see if PHY is accessible by
   15627 		 * forcing MAC to SMBus mode first.
   15628 		 */
   15629 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15630 		reg |= CTRL_EXT_FORCE_SMBUS;
   15631 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15632 #if 0
   15633 		/* XXX Isn't this required??? */
   15634 		CSR_WRITE_FLUSH(sc);
   15635 #endif
   15636 		/* Wait 50 milliseconds for MAC to finish any retries
   15637 		 * that it might be trying to perform from previous
   15638 		 * attempts to acknowledge any phy read requests.
   15639 		 */
   15640 		delay(50 * 1000);
   15641 		/* FALLTHROUGH */
   15642 	case WM_T_PCH2:
   15643 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15644 			break;
   15645 		/* FALLTHROUGH */
   15646 	case WM_T_PCH:
   15647 		if (sc->sc_type == WM_T_PCH)
   15648 			if ((fwsm & FWSM_FW_VALID) != 0)
   15649 				break;
   15650 
   15651 		if (wm_phy_resetisblocked(sc) == true) {
   15652 			device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
   15653 			break;
   15654 		}
   15655 
   15656 		/* Toggle LANPHYPC Value bit */
   15657 		wm_toggle_lanphypc_pch_lpt(sc);
   15658 
   15659 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15660 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15661 				break;
   15662 
   15663 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15664 			 * so ensure that the MAC is also out of SMBus mode
   15665 			 */
   15666 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15667 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15668 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15669 
   15670 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15671 				break;
   15672 			rv = -1;
   15673 		}
   15674 		break;
   15675 	default:
   15676 		break;
   15677 	}
   15678 
   15679 	/* Release semaphore */
   15680 	sc->phy.release(sc);
   15681 
   15682 	if (rv == 0) {
   15683 		/* Check to see if able to reset PHY.  Print error if not */
   15684 		if (wm_phy_resetisblocked(sc)) {
   15685 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15686 			goto out;
   15687 		}
   15688 
   15689 		/* Reset the PHY before any access to it.  Doing so, ensures
   15690 		 * that the PHY is in a known good state before we read/write
   15691 		 * PHY registers.  The generic reset is sufficient here,
   15692 		 * because we haven't determined the PHY type yet.
   15693 		 */
   15694 		if (wm_reset_phy(sc) != 0)
   15695 			goto out;
   15696 
   15697 		/* On a successful reset, possibly need to wait for the PHY
   15698 		 * to quiesce to an accessible state before returning control
   15699 		 * to the calling function.  If the PHY does not quiesce, then
   15700 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15701 		 *  the PHY is in.
   15702 		 */
   15703 		if (wm_phy_resetisblocked(sc))
   15704 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15705 	}
   15706 
   15707 out:
   15708 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15709 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15710 		delay(10*1000);
   15711 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15712 	}
   15713 
   15714 	return 0;
   15715 }
   15716 
   15717 static void
   15718 wm_init_manageability(struct wm_softc *sc)
   15719 {
   15720 
   15721 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15722 		device_xname(sc->sc_dev), __func__));
   15723 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   15724 
   15725 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15726 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15727 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15728 
   15729 		/* Disable hardware interception of ARP */
   15730 		manc &= ~MANC_ARP_EN;
   15731 
   15732 		/* Enable receiving management packets to the host */
   15733 		if (sc->sc_type >= WM_T_82571) {
   15734 			manc |= MANC_EN_MNG2HOST;
   15735 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15736 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15737 		}
   15738 
   15739 		CSR_WRITE(sc, WMREG_MANC, manc);
   15740 	}
   15741 }
   15742 
   15743 static void
   15744 wm_release_manageability(struct wm_softc *sc)
   15745 {
   15746 
   15747 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15748 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15749 
   15750 		manc |= MANC_ARP_EN;
   15751 		if (sc->sc_type >= WM_T_82571)
   15752 			manc &= ~MANC_EN_MNG2HOST;
   15753 
   15754 		CSR_WRITE(sc, WMREG_MANC, manc);
   15755 	}
   15756 }
   15757 
   15758 static void
   15759 wm_get_wakeup(struct wm_softc *sc)
   15760 {
   15761 
   15762 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15763 	switch (sc->sc_type) {
   15764 	case WM_T_82573:
   15765 	case WM_T_82583:
   15766 		sc->sc_flags |= WM_F_HAS_AMT;
   15767 		/* FALLTHROUGH */
   15768 	case WM_T_80003:
   15769 	case WM_T_82575:
   15770 	case WM_T_82576:
   15771 	case WM_T_82580:
   15772 	case WM_T_I350:
   15773 	case WM_T_I354:
   15774 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15775 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15776 		/* FALLTHROUGH */
   15777 	case WM_T_82541:
   15778 	case WM_T_82541_2:
   15779 	case WM_T_82547:
   15780 	case WM_T_82547_2:
   15781 	case WM_T_82571:
   15782 	case WM_T_82572:
   15783 	case WM_T_82574:
   15784 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15785 		break;
   15786 	case WM_T_ICH8:
   15787 	case WM_T_ICH9:
   15788 	case WM_T_ICH10:
   15789 	case WM_T_PCH:
   15790 	case WM_T_PCH2:
   15791 	case WM_T_PCH_LPT:
   15792 	case WM_T_PCH_SPT:
   15793 	case WM_T_PCH_CNP:
   15794 		sc->sc_flags |= WM_F_HAS_AMT;
   15795 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15796 		break;
   15797 	default:
   15798 		break;
   15799 	}
   15800 
   15801 	/* 1: HAS_MANAGE */
   15802 	if (wm_enable_mng_pass_thru(sc) != 0)
   15803 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15804 
   15805 	/*
   15806 	 * Note that the WOL flags is set after the resetting of the eeprom
   15807 	 * stuff
   15808 	 */
   15809 }
   15810 
   15811 /*
   15812  * Unconfigure Ultra Low Power mode.
   15813  * Only for I217 and newer (see below).
   15814  */
   15815 static int
   15816 wm_ulp_disable(struct wm_softc *sc)
   15817 {
   15818 	uint32_t reg;
   15819 	uint16_t phyreg;
   15820 	int i = 0, rv;
   15821 
   15822 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15823 		device_xname(sc->sc_dev), __func__));
   15824 	/* Exclude old devices */
   15825 	if ((sc->sc_type < WM_T_PCH_LPT)
   15826 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15827 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15828 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15829 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15830 		return 0;
   15831 
   15832 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15833 		/* Request ME un-configure ULP mode in the PHY */
   15834 		reg = CSR_READ(sc, WMREG_H2ME);
   15835 		reg &= ~H2ME_ULP;
   15836 		reg |= H2ME_ENFORCE_SETTINGS;
   15837 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15838 
   15839 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15840 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15841 			if (i++ == 30) {
   15842 				device_printf(sc->sc_dev, "%s timed out\n",
   15843 				    __func__);
   15844 				return -1;
   15845 			}
   15846 			delay(10 * 1000);
   15847 		}
   15848 		reg = CSR_READ(sc, WMREG_H2ME);
   15849 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15850 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15851 
   15852 		return 0;
   15853 	}
   15854 
   15855 	/* Acquire semaphore */
   15856 	rv = sc->phy.acquire(sc);
   15857 	if (rv != 0) {
   15858 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15859 		device_xname(sc->sc_dev), __func__));
   15860 		return rv;
   15861 	}
   15862 
   15863 	/* Toggle LANPHYPC */
   15864 	wm_toggle_lanphypc_pch_lpt(sc);
   15865 
   15866 	/* Unforce SMBus mode in PHY */
   15867 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15868 	if (rv != 0) {
   15869 		uint32_t reg2;
   15870 
   15871 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15872 			__func__);
   15873 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15874 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15875 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15876 		delay(50 * 1000);
   15877 
   15878 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15879 		    &phyreg);
   15880 		if (rv != 0)
   15881 			goto release;
   15882 	}
   15883 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15884 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15885 
   15886 	/* Unforce SMBus mode in MAC */
   15887 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15888 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15889 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15890 
   15891 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15892 	if (rv != 0)
   15893 		goto release;
   15894 	phyreg |= HV_PM_CTRL_K1_ENA;
   15895 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15896 
   15897 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15898 		&phyreg);
   15899 	if (rv != 0)
   15900 		goto release;
   15901 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15902 	    | I218_ULP_CONFIG1_STICKY_ULP
   15903 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15904 	    | I218_ULP_CONFIG1_WOL_HOST
   15905 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15906 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15907 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15908 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15909 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15910 	phyreg |= I218_ULP_CONFIG1_START;
   15911 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15912 
   15913 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15914 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15915 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15916 
   15917 release:
   15918 	/* Release semaphore */
   15919 	sc->phy.release(sc);
   15920 	wm_gmii_reset(sc);
   15921 	delay(50 * 1000);
   15922 
   15923 	return rv;
   15924 }
   15925 
   15926 /* WOL in the newer chipset interfaces (pchlan) */
   15927 static int
   15928 wm_enable_phy_wakeup(struct wm_softc *sc)
   15929 {
   15930 	device_t dev = sc->sc_dev;
   15931 	uint32_t mreg, moff;
   15932 	uint16_t wuce, wuc, wufc, preg;
   15933 	int i, rv;
   15934 
   15935 	KASSERT(sc->sc_type >= WM_T_PCH);
   15936 
   15937 	/* Copy MAC RARs to PHY RARs */
   15938 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15939 
   15940 	/* Activate PHY wakeup */
   15941 	rv = sc->phy.acquire(sc);
   15942 	if (rv != 0) {
   15943 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15944 		    __func__);
   15945 		return rv;
   15946 	}
   15947 
   15948 	/*
   15949 	 * Enable access to PHY wakeup registers.
   15950 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15951 	 */
   15952 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15953 	if (rv != 0) {
   15954 		device_printf(dev,
   15955 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15956 		goto release;
   15957 	}
   15958 
   15959 	/* Copy MAC MTA to PHY MTA */
   15960 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15961 		uint16_t lo, hi;
   15962 
   15963 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15964 		lo = (uint16_t)(mreg & 0xffff);
   15965 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15966 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15967 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15968 	}
   15969 
   15970 	/* Configure PHY Rx Control register */
   15971 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15972 	mreg = CSR_READ(sc, WMREG_RCTL);
   15973 	if (mreg & RCTL_UPE)
   15974 		preg |= BM_RCTL_UPE;
   15975 	if (mreg & RCTL_MPE)
   15976 		preg |= BM_RCTL_MPE;
   15977 	preg &= ~(BM_RCTL_MO_MASK);
   15978 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15979 	if (moff != 0)
   15980 		preg |= moff << BM_RCTL_MO_SHIFT;
   15981 	if (mreg & RCTL_BAM)
   15982 		preg |= BM_RCTL_BAM;
   15983 	if (mreg & RCTL_PMCF)
   15984 		preg |= BM_RCTL_PMCF;
   15985 	mreg = CSR_READ(sc, WMREG_CTRL);
   15986 	if (mreg & CTRL_RFCE)
   15987 		preg |= BM_RCTL_RFCE;
   15988 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15989 
   15990 	wuc = WUC_APME | WUC_PME_EN;
   15991 	wufc = WUFC_MAG;
   15992 	/* Enable PHY wakeup in MAC register */
   15993 	CSR_WRITE(sc, WMREG_WUC,
   15994 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15995 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15996 
   15997 	/* Configure and enable PHY wakeup in PHY registers */
   15998 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15999 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   16000 
   16001 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   16002 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16003 
   16004 release:
   16005 	sc->phy.release(sc);
   16006 
   16007 	return 0;
   16008 }
   16009 
   16010 /* Power down workaround on D3 */
   16011 static void
   16012 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   16013 {
   16014 	uint32_t reg;
   16015 	uint16_t phyreg;
   16016 	int i;
   16017 
   16018 	for (i = 0; i < 2; i++) {
   16019 		/* Disable link */
   16020 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16021 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16022 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16023 
   16024 		/*
   16025 		 * Call gig speed drop workaround on Gig disable before
   16026 		 * accessing any PHY registers
   16027 		 */
   16028 		if (sc->sc_type == WM_T_ICH8)
   16029 			wm_gig_downshift_workaround_ich8lan(sc);
   16030 
   16031 		/* Write VR power-down enable */
   16032 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16033 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16034 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   16035 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   16036 
   16037 		/* Read it back and test */
   16038 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16039 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16040 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   16041 			break;
   16042 
   16043 		/* Issue PHY reset and repeat at most one more time */
   16044 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   16045 	}
   16046 }
   16047 
   16048 /*
   16049  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   16050  *  @sc: pointer to the HW structure
   16051  *
   16052  *  During S0 to Sx transition, it is possible the link remains at gig
   16053  *  instead of negotiating to a lower speed.  Before going to Sx, set
   16054  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   16055  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   16056  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   16057  *  needs to be written.
   16058  *  Parts that support (and are linked to a partner which support) EEE in
   16059  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   16060  *  than 10Mbps w/o EEE.
   16061  */
   16062 static void
   16063 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   16064 {
   16065 	device_t dev = sc->sc_dev;
   16066 	struct ethercom *ec = &sc->sc_ethercom;
   16067 	uint32_t phy_ctrl;
   16068 	int rv;
   16069 
   16070 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   16071 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   16072 
   16073 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   16074 
   16075 	if (sc->sc_phytype == WMPHY_I217) {
   16076 		uint16_t devid = sc->sc_pcidevid;
   16077 
   16078 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   16079 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   16080 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   16081 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   16082 		    (sc->sc_type >= WM_T_PCH_SPT))
   16083 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   16084 			    CSR_READ(sc, WMREG_FEXTNVM6)
   16085 			    & ~FEXTNVM6_REQ_PLL_CLK);
   16086 
   16087 		if (sc->phy.acquire(sc) != 0)
   16088 			goto out;
   16089 
   16090 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16091 			uint16_t eee_advert;
   16092 
   16093 			rv = wm_read_emi_reg_locked(dev,
   16094 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   16095 			if (rv)
   16096 				goto release;
   16097 
   16098 			/*
   16099 			 * Disable LPLU if both link partners support 100BaseT
   16100 			 * EEE and 100Full is advertised on both ends of the
   16101 			 * link, and enable Auto Enable LPI since there will
   16102 			 * be no driver to enable LPI while in Sx.
   16103 			 */
   16104 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   16105 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   16106 				uint16_t anar, phy_reg;
   16107 
   16108 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   16109 				    &anar);
   16110 				if (anar & ANAR_TX_FD) {
   16111 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   16112 					    PHY_CTRL_NOND0A_LPLU);
   16113 
   16114 					/* Set Auto Enable LPI after link up */
   16115 					sc->phy.readreg_locked(dev, 2,
   16116 					    I217_LPI_GPIO_CTRL, &phy_reg);
   16117 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16118 					sc->phy.writereg_locked(dev, 2,
   16119 					    I217_LPI_GPIO_CTRL, phy_reg);
   16120 				}
   16121 			}
   16122 		}
   16123 
   16124 		/*
   16125 		 * For i217 Intel Rapid Start Technology support,
   16126 		 * when the system is going into Sx and no manageability engine
   16127 		 * is present, the driver must configure proxy to reset only on
   16128 		 * power good.	LPI (Low Power Idle) state must also reset only
   16129 		 * on power good, as well as the MTA (Multicast table array).
   16130 		 * The SMBus release must also be disabled on LCD reset.
   16131 		 */
   16132 
   16133 		/*
   16134 		 * Enable MTA to reset for Intel Rapid Start Technology
   16135 		 * Support
   16136 		 */
   16137 
   16138 release:
   16139 		sc->phy.release(sc);
   16140 	}
   16141 out:
   16142 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   16143 
   16144 	if (sc->sc_type == WM_T_ICH8)
   16145 		wm_gig_downshift_workaround_ich8lan(sc);
   16146 
   16147 	if (sc->sc_type >= WM_T_PCH) {
   16148 		wm_oem_bits_config_ich8lan(sc, false);
   16149 
   16150 		/* Reset PHY to activate OEM bits on 82577/8 */
   16151 		if (sc->sc_type == WM_T_PCH)
   16152 			wm_reset_phy(sc);
   16153 
   16154 		if (sc->phy.acquire(sc) != 0)
   16155 			return;
   16156 		wm_write_smbus_addr(sc);
   16157 		sc->phy.release(sc);
   16158 	}
   16159 }
   16160 
   16161 /*
   16162  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   16163  *  @sc: pointer to the HW structure
   16164  *
   16165  *  During Sx to S0 transitions on non-managed devices or managed devices
   16166  *  on which PHY resets are not blocked, if the PHY registers cannot be
   16167  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   16168  *  the PHY.
   16169  *  On i217, setup Intel Rapid Start Technology.
   16170  */
   16171 static int
   16172 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   16173 {
   16174 	device_t dev = sc->sc_dev;
   16175 	int rv;
   16176 
   16177 	if (sc->sc_type < WM_T_PCH2)
   16178 		return 0;
   16179 
   16180 	rv = wm_init_phy_workarounds_pchlan(sc);
   16181 	if (rv != 0)
   16182 		return rv;
   16183 
   16184 	/* For i217 Intel Rapid Start Technology support when the system
   16185 	 * is transitioning from Sx and no manageability engine is present
   16186 	 * configure SMBus to restore on reset, disable proxy, and enable
   16187 	 * the reset on MTA (Multicast table array).
   16188 	 */
   16189 	if (sc->sc_phytype == WMPHY_I217) {
   16190 		uint16_t phy_reg;
   16191 
   16192 		rv = sc->phy.acquire(sc);
   16193 		if (rv != 0)
   16194 			return rv;
   16195 
   16196 		/* Clear Auto Enable LPI after link up */
   16197 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   16198 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16199 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   16200 
   16201 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16202 			/* Restore clear on SMB if no manageability engine
   16203 			 * is present
   16204 			 */
   16205 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   16206 			    &phy_reg);
   16207 			if (rv != 0)
   16208 				goto release;
   16209 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   16210 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   16211 
   16212 			/* Disable Proxy */
   16213 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   16214 		}
   16215 		/* Enable reset on MTA */
   16216 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   16217 		if (rv != 0)
   16218 			goto release;
   16219 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   16220 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   16221 
   16222 release:
   16223 		sc->phy.release(sc);
   16224 		return rv;
   16225 	}
   16226 
   16227 	return 0;
   16228 }
   16229 
   16230 static void
   16231 wm_enable_wakeup(struct wm_softc *sc)
   16232 {
   16233 	uint32_t reg, pmreg;
   16234 	pcireg_t pmode;
   16235 	int rv = 0;
   16236 
   16237 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16238 		device_xname(sc->sc_dev), __func__));
   16239 
   16240 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16241 	    &pmreg, NULL) == 0)
   16242 		return;
   16243 
   16244 	if ((sc->sc_flags & WM_F_WOL) == 0)
   16245 		goto pme;
   16246 
   16247 	/* Advertise the wakeup capability */
   16248 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   16249 	    | CTRL_SWDPIN(3));
   16250 
   16251 	/* Keep the laser running on fiber adapters */
   16252 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   16253 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   16254 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16255 		reg |= CTRL_EXT_SWDPIN(3);
   16256 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16257 	}
   16258 
   16259 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   16260 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   16261 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   16262 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   16263 		wm_suspend_workarounds_ich8lan(sc);
   16264 
   16265 #if 0	/* For the multicast packet */
   16266 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   16267 	reg |= WUFC_MC;
   16268 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   16269 #endif
   16270 
   16271 	if (sc->sc_type >= WM_T_PCH) {
   16272 		rv = wm_enable_phy_wakeup(sc);
   16273 		if (rv != 0)
   16274 			goto pme;
   16275 	} else {
   16276 		/* Enable wakeup by the MAC */
   16277 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   16278 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   16279 	}
   16280 
   16281 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   16282 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   16283 		|| (sc->sc_type == WM_T_PCH2))
   16284 	    && (sc->sc_phytype == WMPHY_IGP_3))
   16285 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   16286 
   16287 pme:
   16288 	/* Request PME */
   16289 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   16290 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   16291 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   16292 		/* For WOL */
   16293 		pmode |= PCI_PMCSR_PME_EN;
   16294 	} else {
   16295 		/* Disable WOL */
   16296 		pmode &= ~PCI_PMCSR_PME_EN;
   16297 	}
   16298 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   16299 }
   16300 
   16301 /* Disable ASPM L0s and/or L1 for workaround */
   16302 static void
   16303 wm_disable_aspm(struct wm_softc *sc)
   16304 {
   16305 	pcireg_t reg, mask = 0;
   16306 	unsigned const char *str = "";
   16307 
   16308 	/*
   16309 	 *  Only for PCIe device which has PCIe capability in the PCI config
   16310 	 * space.
   16311 	 */
   16312 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   16313 		return;
   16314 
   16315 	switch (sc->sc_type) {
   16316 	case WM_T_82571:
   16317 	case WM_T_82572:
   16318 		/*
   16319 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   16320 		 * State Power management L1 State (ASPM L1).
   16321 		 */
   16322 		mask = PCIE_LCSR_ASPM_L1;
   16323 		str = "L1 is";
   16324 		break;
   16325 	case WM_T_82573:
   16326 	case WM_T_82574:
   16327 	case WM_T_82583:
   16328 		/*
   16329 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   16330 		 *
   16331 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   16332 		 * some chipset.  The document of 82574 and 82583 says that
   16333 		 * disabling L0s with some specific chipset is sufficient,
   16334 		 * but we follow as of the Intel em driver does.
   16335 		 *
   16336 		 * References:
   16337 		 * Errata 8 of the Specification Update of i82573.
   16338 		 * Errata 20 of the Specification Update of i82574.
   16339 		 * Errata 9 of the Specification Update of i82583.
   16340 		 */
   16341 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   16342 		str = "L0s and L1 are";
   16343 		break;
   16344 	default:
   16345 		return;
   16346 	}
   16347 
   16348 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16349 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   16350 	reg &= ~mask;
   16351 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16352 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   16353 
   16354 	/* Print only in wm_attach() */
   16355 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   16356 		aprint_verbose_dev(sc->sc_dev,
   16357 		    "ASPM %s disabled to workaround the errata.\n", str);
   16358 }
   16359 
   16360 /* LPLU */
   16361 
   16362 static void
   16363 wm_lplu_d0_disable(struct wm_softc *sc)
   16364 {
   16365 	struct mii_data *mii = &sc->sc_mii;
   16366 	uint32_t reg;
   16367 	uint16_t phyval;
   16368 
   16369 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16370 		device_xname(sc->sc_dev), __func__));
   16371 
   16372 	if (sc->sc_phytype == WMPHY_IFE)
   16373 		return;
   16374 
   16375 	switch (sc->sc_type) {
   16376 	case WM_T_82571:
   16377 	case WM_T_82572:
   16378 	case WM_T_82573:
   16379 	case WM_T_82575:
   16380 	case WM_T_82576:
   16381 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   16382 		phyval &= ~PMR_D0_LPLU;
   16383 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   16384 		break;
   16385 	case WM_T_82580:
   16386 	case WM_T_I350:
   16387 	case WM_T_I210:
   16388 	case WM_T_I211:
   16389 		reg = CSR_READ(sc, WMREG_PHPM);
   16390 		reg &= ~PHPM_D0A_LPLU;
   16391 		CSR_WRITE(sc, WMREG_PHPM, reg);
   16392 		break;
   16393 	case WM_T_82574:
   16394 	case WM_T_82583:
   16395 	case WM_T_ICH8:
   16396 	case WM_T_ICH9:
   16397 	case WM_T_ICH10:
   16398 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16399 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   16400 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16401 		CSR_WRITE_FLUSH(sc);
   16402 		break;
   16403 	case WM_T_PCH:
   16404 	case WM_T_PCH2:
   16405 	case WM_T_PCH_LPT:
   16406 	case WM_T_PCH_SPT:
   16407 	case WM_T_PCH_CNP:
   16408 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   16409 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   16410 		if (wm_phy_resetisblocked(sc) == false)
   16411 			phyval |= HV_OEM_BITS_ANEGNOW;
   16412 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   16413 		break;
   16414 	default:
   16415 		break;
   16416 	}
   16417 }
   16418 
   16419 /* EEE */
   16420 
   16421 static int
   16422 wm_set_eee_i350(struct wm_softc *sc)
   16423 {
   16424 	struct ethercom *ec = &sc->sc_ethercom;
   16425 	uint32_t ipcnfg, eeer;
   16426 	uint32_t ipcnfg_mask
   16427 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   16428 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   16429 
   16430 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   16431 
   16432 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   16433 	eeer = CSR_READ(sc, WMREG_EEER);
   16434 
   16435 	/* Enable or disable per user setting */
   16436 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16437 		ipcnfg |= ipcnfg_mask;
   16438 		eeer |= eeer_mask;
   16439 	} else {
   16440 		ipcnfg &= ~ipcnfg_mask;
   16441 		eeer &= ~eeer_mask;
   16442 	}
   16443 
   16444 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   16445 	CSR_WRITE(sc, WMREG_EEER, eeer);
   16446 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   16447 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   16448 
   16449 	return 0;
   16450 }
   16451 
   16452 static int
   16453 wm_set_eee_pchlan(struct wm_softc *sc)
   16454 {
   16455 	device_t dev = sc->sc_dev;
   16456 	struct ethercom *ec = &sc->sc_ethercom;
   16457 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   16458 	int rv;
   16459 
   16460 	switch (sc->sc_phytype) {
   16461 	case WMPHY_82579:
   16462 		lpa = I82579_EEE_LP_ABILITY;
   16463 		pcs_status = I82579_EEE_PCS_STATUS;
   16464 		adv_addr = I82579_EEE_ADVERTISEMENT;
   16465 		break;
   16466 	case WMPHY_I217:
   16467 		lpa = I217_EEE_LP_ABILITY;
   16468 		pcs_status = I217_EEE_PCS_STATUS;
   16469 		adv_addr = I217_EEE_ADVERTISEMENT;
   16470 		break;
   16471 	default:
   16472 		return 0;
   16473 	}
   16474 
   16475 	rv = sc->phy.acquire(sc);
   16476 	if (rv != 0) {
   16477 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   16478 		return rv;
   16479 	}
   16480 
   16481 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   16482 	if (rv != 0)
   16483 		goto release;
   16484 
   16485 	/* Clear bits that enable EEE in various speeds */
   16486 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   16487 
   16488 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16489 		/* Save off link partner's EEE ability */
   16490 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   16491 		if (rv != 0)
   16492 			goto release;
   16493 
   16494 		/* Read EEE advertisement */
   16495 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   16496 			goto release;
   16497 
   16498 		/*
   16499 		 * Enable EEE only for speeds in which the link partner is
   16500 		 * EEE capable and for which we advertise EEE.
   16501 		 */
   16502 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   16503 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   16504 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   16505 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   16506 			if ((data & ANLPAR_TX_FD) != 0)
   16507 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   16508 			else {
   16509 				/*
   16510 				 * EEE is not supported in 100Half, so ignore
   16511 				 * partner's EEE in 100 ability if full-duplex
   16512 				 * is not advertised.
   16513 				 */
   16514 				sc->eee_lp_ability
   16515 				    &= ~AN_EEEADVERT_100_TX;
   16516 			}
   16517 		}
   16518 	}
   16519 
   16520 	if (sc->sc_phytype == WMPHY_82579) {
   16521 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   16522 		if (rv != 0)
   16523 			goto release;
   16524 
   16525 		data &= ~I82579_LPI_PLL_SHUT_100;
   16526 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   16527 	}
   16528 
   16529 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   16530 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   16531 		goto release;
   16532 
   16533 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   16534 release:
   16535 	sc->phy.release(sc);
   16536 
   16537 	return rv;
   16538 }
   16539 
   16540 static int
   16541 wm_set_eee(struct wm_softc *sc)
   16542 {
   16543 	struct ethercom *ec = &sc->sc_ethercom;
   16544 
   16545 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   16546 		return 0;
   16547 
   16548 	if (sc->sc_type == WM_T_I354) {
   16549 		/* I354 uses an external PHY */
   16550 		return 0; /* not yet */
   16551 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   16552 		return wm_set_eee_i350(sc);
   16553 	else if (sc->sc_type >= WM_T_PCH2)
   16554 		return wm_set_eee_pchlan(sc);
   16555 
   16556 	return 0;
   16557 }
   16558 
   16559 /*
   16560  * Workarounds (mainly PHY related).
   16561  * Basically, PHY's workarounds are in the PHY drivers.
   16562  */
   16563 
   16564 /* Workaround for 82566 Kumeran PCS lock loss */
   16565 static int
   16566 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   16567 {
   16568 	struct mii_data *mii = &sc->sc_mii;
   16569 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16570 	int i, reg, rv;
   16571 	uint16_t phyreg;
   16572 
   16573 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16574 		device_xname(sc->sc_dev), __func__));
   16575 
   16576 	/* If the link is not up, do nothing */
   16577 	if ((status & STATUS_LU) == 0)
   16578 		return 0;
   16579 
   16580 	/* Nothing to do if the link is other than 1Gbps */
   16581 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   16582 		return 0;
   16583 
   16584 	for (i = 0; i < 10; i++) {
   16585 		/* read twice */
   16586 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16587 		if (rv != 0)
   16588 			return rv;
   16589 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16590 		if (rv != 0)
   16591 			return rv;
   16592 
   16593 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   16594 			goto out;	/* GOOD! */
   16595 
   16596 		/* Reset the PHY */
   16597 		wm_reset_phy(sc);
   16598 		delay(5*1000);
   16599 	}
   16600 
   16601 	/* Disable GigE link negotiation */
   16602 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16603 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16604 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16605 
   16606 	/*
   16607 	 * Call gig speed drop workaround on Gig disable before accessing
   16608 	 * any PHY registers.
   16609 	 */
   16610 	wm_gig_downshift_workaround_ich8lan(sc);
   16611 
   16612 out:
   16613 	return 0;
   16614 }
   16615 
   16616 /*
   16617  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   16618  *  @sc: pointer to the HW structure
   16619  *
   16620  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   16621  *  LPLU, Gig disable, MDIC PHY reset):
   16622  *    1) Set Kumeran Near-end loopback
   16623  *    2) Clear Kumeran Near-end loopback
   16624  *  Should only be called for ICH8[m] devices with any 1G Phy.
   16625  */
   16626 static void
   16627 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   16628 {
   16629 	uint16_t kmreg;
   16630 
   16631 	/* Only for igp3 */
   16632 	if (sc->sc_phytype == WMPHY_IGP_3) {
   16633 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   16634 			return;
   16635 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   16636 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   16637 			return;
   16638 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   16639 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   16640 	}
   16641 }
   16642 
   16643 /*
   16644  * Workaround for pch's PHYs
   16645  * XXX should be moved to new PHY driver?
   16646  */
   16647 static int
   16648 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16649 {
   16650 	device_t dev = sc->sc_dev;
   16651 	struct mii_data *mii = &sc->sc_mii;
   16652 	struct mii_softc *child;
   16653 	uint16_t phy_data, phyrev = 0;
   16654 	int phytype = sc->sc_phytype;
   16655 	int rv;
   16656 
   16657 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16658 		device_xname(dev), __func__));
   16659 	KASSERT(sc->sc_type == WM_T_PCH);
   16660 
   16661 	/* Set MDIO slow mode before any other MDIO access */
   16662 	if (phytype == WMPHY_82577)
   16663 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16664 			return rv;
   16665 
   16666 	child = LIST_FIRST(&mii->mii_phys);
   16667 	if (child != NULL)
   16668 		phyrev = child->mii_mpd_rev;
   16669 
   16670 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16671 	if ((child != NULL) &&
   16672 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16673 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16674 		/* Disable generation of early preamble (0x4431) */
   16675 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16676 		    &phy_data);
   16677 		if (rv != 0)
   16678 			return rv;
   16679 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16680 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16681 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16682 		    phy_data);
   16683 		if (rv != 0)
   16684 			return rv;
   16685 
   16686 		/* Preamble tuning for SSC */
   16687 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16688 		if (rv != 0)
   16689 			return rv;
   16690 	}
   16691 
   16692 	/* 82578 */
   16693 	if (phytype == WMPHY_82578) {
   16694 		/*
   16695 		 * Return registers to default by doing a soft reset then
   16696 		 * writing 0x3140 to the control register
   16697 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16698 		 */
   16699 		if ((child != NULL) && (phyrev < 2)) {
   16700 			PHY_RESET(child);
   16701 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16702 			if (rv != 0)
   16703 				return rv;
   16704 		}
   16705 	}
   16706 
   16707 	/* Select page 0 */
   16708 	if ((rv = sc->phy.acquire(sc)) != 0)
   16709 		return rv;
   16710 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16711 	sc->phy.release(sc);
   16712 	if (rv != 0)
   16713 		return rv;
   16714 
   16715 	/*
   16716 	 * Configure the K1 Si workaround during phy reset assuming there is
   16717 	 * link so that it disables K1 if link is in 1Gbps.
   16718 	 */
   16719 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16720 		return rv;
   16721 
   16722 	/* Workaround for link disconnects on a busy hub in half duplex */
   16723 	rv = sc->phy.acquire(sc);
   16724 	if (rv)
   16725 		return rv;
   16726 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16727 	if (rv)
   16728 		goto release;
   16729 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16730 	    phy_data & 0x00ff);
   16731 	if (rv)
   16732 		goto release;
   16733 
   16734 	/* Set MSE higher to enable link to stay up when noise is high */
   16735 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16736 release:
   16737 	sc->phy.release(sc);
   16738 
   16739 	return rv;
   16740 }
   16741 
   16742 /*
   16743  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16744  *  @sc:   pointer to the HW structure
   16745  */
   16746 static void
   16747 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16748 {
   16749 
   16750 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16751 		device_xname(sc->sc_dev), __func__));
   16752 
   16753 	if (sc->phy.acquire(sc) != 0)
   16754 		return;
   16755 
   16756 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16757 
   16758 	sc->phy.release(sc);
   16759 }
   16760 
   16761 static void
   16762 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16763 {
   16764 	device_t dev = sc->sc_dev;
   16765 	uint32_t mac_reg;
   16766 	uint16_t i, wuce;
   16767 	int count;
   16768 
   16769 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16770 		device_xname(dev), __func__));
   16771 
   16772 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16773 		return;
   16774 
   16775 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16776 	count = wm_rar_count(sc);
   16777 	for (i = 0; i < count; i++) {
   16778 		uint16_t lo, hi;
   16779 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16780 		lo = (uint16_t)(mac_reg & 0xffff);
   16781 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16782 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16783 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16784 
   16785 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16786 		lo = (uint16_t)(mac_reg & 0xffff);
   16787 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16788 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16789 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16790 	}
   16791 
   16792 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16793 }
   16794 
   16795 /*
   16796  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16797  *  with 82579 PHY
   16798  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16799  */
   16800 static int
   16801 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16802 {
   16803 	device_t dev = sc->sc_dev;
   16804 	int rar_count;
   16805 	int rv;
   16806 	uint32_t mac_reg;
   16807 	uint16_t dft_ctrl, data;
   16808 	uint16_t i;
   16809 
   16810 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16811 		device_xname(dev), __func__));
   16812 
   16813 	if (sc->sc_type < WM_T_PCH2)
   16814 		return 0;
   16815 
   16816 	/* Acquire PHY semaphore */
   16817 	rv = sc->phy.acquire(sc);
   16818 	if (rv != 0)
   16819 		return rv;
   16820 
   16821 	/* Disable Rx path while enabling/disabling workaround */
   16822 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16823 	if (rv != 0)
   16824 		goto out;
   16825 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16826 	    dft_ctrl | (1 << 14));
   16827 	if (rv != 0)
   16828 		goto out;
   16829 
   16830 	if (enable) {
   16831 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16832 		 * SHRAL/H) and initial CRC values to the MAC
   16833 		 */
   16834 		rar_count = wm_rar_count(sc);
   16835 		for (i = 0; i < rar_count; i++) {
   16836 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16837 			uint32_t addr_high, addr_low;
   16838 
   16839 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16840 			if (!(addr_high & RAL_AV))
   16841 				continue;
   16842 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16843 			mac_addr[0] = (addr_low & 0xFF);
   16844 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16845 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16846 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16847 			mac_addr[4] = (addr_high & 0xFF);
   16848 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16849 
   16850 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16851 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16852 		}
   16853 
   16854 		/* Write Rx addresses to the PHY */
   16855 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16856 	}
   16857 
   16858 	/*
   16859 	 * If enable ==
   16860 	 *	true: Enable jumbo frame workaround in the MAC.
   16861 	 *	false: Write MAC register values back to h/w defaults.
   16862 	 */
   16863 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16864 	if (enable) {
   16865 		mac_reg &= ~(1 << 14);
   16866 		mac_reg |= (7 << 15);
   16867 	} else
   16868 		mac_reg &= ~(0xf << 14);
   16869 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16870 
   16871 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16872 	if (enable) {
   16873 		mac_reg |= RCTL_SECRC;
   16874 		sc->sc_rctl |= RCTL_SECRC;
   16875 		sc->sc_flags |= WM_F_CRC_STRIP;
   16876 	} else {
   16877 		mac_reg &= ~RCTL_SECRC;
   16878 		sc->sc_rctl &= ~RCTL_SECRC;
   16879 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16880 	}
   16881 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16882 
   16883 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16884 	if (rv != 0)
   16885 		goto out;
   16886 	if (enable)
   16887 		data |= 1 << 0;
   16888 	else
   16889 		data &= ~(1 << 0);
   16890 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16891 	if (rv != 0)
   16892 		goto out;
   16893 
   16894 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16895 	if (rv != 0)
   16896 		goto out;
   16897 	/*
   16898 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16899 	 * on both the enable case and the disable case. Is it correct?
   16900 	 */
   16901 	data &= ~(0xf << 8);
   16902 	data |= (0xb << 8);
   16903 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16904 	if (rv != 0)
   16905 		goto out;
   16906 
   16907 	/*
   16908 	 * If enable ==
   16909 	 *	true: Enable jumbo frame workaround in the PHY.
   16910 	 *	false: Write PHY register values back to h/w defaults.
   16911 	 */
   16912 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16913 	if (rv != 0)
   16914 		goto out;
   16915 	data &= ~(0x7F << 5);
   16916 	if (enable)
   16917 		data |= (0x37 << 5);
   16918 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16919 	if (rv != 0)
   16920 		goto out;
   16921 
   16922 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16923 	if (rv != 0)
   16924 		goto out;
   16925 	if (enable)
   16926 		data &= ~(1 << 13);
   16927 	else
   16928 		data |= (1 << 13);
   16929 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16930 	if (rv != 0)
   16931 		goto out;
   16932 
   16933 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16934 	if (rv != 0)
   16935 		goto out;
   16936 	data &= ~(0x3FF << 2);
   16937 	if (enable)
   16938 		data |= (I82579_TX_PTR_GAP << 2);
   16939 	else
   16940 		data |= (0x8 << 2);
   16941 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16942 	if (rv != 0)
   16943 		goto out;
   16944 
   16945 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16946 	    enable ? 0xf100 : 0x7e00);
   16947 	if (rv != 0)
   16948 		goto out;
   16949 
   16950 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16951 	if (rv != 0)
   16952 		goto out;
   16953 	if (enable)
   16954 		data |= 1 << 10;
   16955 	else
   16956 		data &= ~(1 << 10);
   16957 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16958 	if (rv != 0)
   16959 		goto out;
   16960 
   16961 	/* Re-enable Rx path after enabling/disabling workaround */
   16962 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16963 	    dft_ctrl & ~(1 << 14));
   16964 
   16965 out:
   16966 	sc->phy.release(sc);
   16967 
   16968 	return rv;
   16969 }
   16970 
   16971 /*
   16972  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16973  *  done after every PHY reset.
   16974  */
   16975 static int
   16976 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16977 {
   16978 	device_t dev = sc->sc_dev;
   16979 	int rv;
   16980 
   16981 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16982 		device_xname(dev), __func__));
   16983 	KASSERT(sc->sc_type == WM_T_PCH2);
   16984 
   16985 	/* Set MDIO slow mode before any other MDIO access */
   16986 	rv = wm_set_mdio_slow_mode_hv(sc);
   16987 	if (rv != 0)
   16988 		return rv;
   16989 
   16990 	rv = sc->phy.acquire(sc);
   16991 	if (rv != 0)
   16992 		return rv;
   16993 	/* Set MSE higher to enable link to stay up when noise is high */
   16994 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16995 	if (rv != 0)
   16996 		goto release;
   16997 	/* Drop link after 5 times MSE threshold was reached */
   16998 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16999 release:
   17000 	sc->phy.release(sc);
   17001 
   17002 	return rv;
   17003 }
   17004 
   17005 /**
   17006  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   17007  *  @link: link up bool flag
   17008  *
   17009  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   17010  *  preventing further DMA write requests.  Workaround the issue by disabling
   17011  *  the de-assertion of the clock request when in 1Gpbs mode.
   17012  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   17013  *  speeds in order to avoid Tx hangs.
   17014  **/
   17015 static int
   17016 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   17017 {
   17018 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   17019 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   17020 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   17021 	uint16_t phyreg;
   17022 
   17023 	if (link && (speed == STATUS_SPEED_1000)) {
   17024 		sc->phy.acquire(sc);
   17025 		int rv = wm_kmrn_readreg_locked(sc,
   17026 		    KUMCTRLSTA_OFFSET_K1_CONFIG, &phyreg);
   17027 		if (rv != 0)
   17028 			goto release;
   17029 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17030 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   17031 		if (rv != 0)
   17032 			goto release;
   17033 		delay(20);
   17034 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   17035 
   17036 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17037 		    &phyreg);
   17038 release:
   17039 		sc->phy.release(sc);
   17040 		return rv;
   17041 	}
   17042 
   17043 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   17044 
   17045 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   17046 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   17047 	    || !link
   17048 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   17049 		goto update_fextnvm6;
   17050 
   17051 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   17052 
   17053 	/* Clear link status transmit timeout */
   17054 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   17055 	if (speed == STATUS_SPEED_100) {
   17056 		/* Set inband Tx timeout to 5x10us for 100Half */
   17057 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17058 
   17059 		/* Do not extend the K1 entry latency for 100Half */
   17060 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17061 	} else {
   17062 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   17063 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17064 
   17065 		/* Extend the K1 entry latency for 10 Mbps */
   17066 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17067 	}
   17068 
   17069 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   17070 
   17071 update_fextnvm6:
   17072 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   17073 	return 0;
   17074 }
   17075 
   17076 /*
   17077  *  wm_k1_gig_workaround_hv - K1 Si workaround
   17078  *  @sc:   pointer to the HW structure
   17079  *  @link: link up bool flag
   17080  *
   17081  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   17082  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   17083  *  If link is down, the function will restore the default K1 setting located
   17084  *  in the NVM.
   17085  */
   17086 static int
   17087 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   17088 {
   17089 	int k1_enable = sc->sc_nvm_k1_enabled;
   17090 	int rv;
   17091 
   17092 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17093 		device_xname(sc->sc_dev), __func__));
   17094 
   17095 	rv = sc->phy.acquire(sc);
   17096 	if (rv != 0)
   17097 		return rv;
   17098 
   17099 	if (link) {
   17100 		k1_enable = 0;
   17101 
   17102 		/* Link stall fix for link up */
   17103 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17104 		    0x0100);
   17105 	} else {
   17106 		/* Link stall fix for link down */
   17107 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17108 		    0x4100);
   17109 	}
   17110 
   17111 	wm_configure_k1_ich8lan(sc, k1_enable);
   17112 	sc->phy.release(sc);
   17113 
   17114 	return 0;
   17115 }
   17116 
   17117 /*
   17118  *  wm_k1_workaround_lv - K1 Si workaround
   17119  *  @sc:   pointer to the HW structure
   17120  *
   17121  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   17122  *  Disable K1 for 1000 and 100 speeds
   17123  */
   17124 static int
   17125 wm_k1_workaround_lv(struct wm_softc *sc)
   17126 {
   17127 	uint32_t reg;
   17128 	uint16_t phyreg;
   17129 	int rv;
   17130 
   17131 	if (sc->sc_type != WM_T_PCH2)
   17132 		return 0;
   17133 
   17134 	/* Set K1 beacon duration based on 10Mbps speed */
   17135 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   17136 	if (rv != 0)
   17137 		return rv;
   17138 
   17139 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   17140 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   17141 		if (phyreg &
   17142 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   17143 			/* LV 1G/100 Packet drop issue wa  */
   17144 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   17145 			    &phyreg);
   17146 			if (rv != 0)
   17147 				return rv;
   17148 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   17149 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   17150 			    phyreg);
   17151 			if (rv != 0)
   17152 				return rv;
   17153 		} else {
   17154 			/* For 10Mbps */
   17155 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   17156 			reg &= ~FEXTNVM4_BEACON_DURATION;
   17157 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   17158 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   17159 		}
   17160 	}
   17161 
   17162 	return 0;
   17163 }
   17164 
   17165 /*
   17166  *  wm_link_stall_workaround_hv - Si workaround
   17167  *  @sc: pointer to the HW structure
   17168  *
   17169  *  This function works around a Si bug where the link partner can get
   17170  *  a link up indication before the PHY does. If small packets are sent
   17171  *  by the link partner they can be placed in the packet buffer without
   17172  *  being properly accounted for by the PHY and will stall preventing
   17173  *  further packets from being received.  The workaround is to clear the
   17174  *  packet buffer after the PHY detects link up.
   17175  */
   17176 static int
   17177 wm_link_stall_workaround_hv(struct wm_softc *sc)
   17178 {
   17179 	uint16_t phyreg;
   17180 
   17181 	if (sc->sc_phytype != WMPHY_82578)
   17182 		return 0;
   17183 
   17184 	/* Do not apply workaround if in PHY loopback bit 14 set */
   17185 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   17186 	if ((phyreg & BMCR_LOOP) != 0)
   17187 		return 0;
   17188 
   17189 	/* Check if link is up and at 1Gbps */
   17190 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   17191 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17192 	    | BM_CS_STATUS_SPEED_MASK;
   17193 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17194 		| BM_CS_STATUS_SPEED_1000))
   17195 		return 0;
   17196 
   17197 	delay(200 * 1000);	/* XXX too big */
   17198 
   17199 	/* Flush the packets in the fifo buffer */
   17200 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17201 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   17202 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17203 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   17204 
   17205 	return 0;
   17206 }
   17207 
   17208 static int
   17209 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   17210 {
   17211 	int rv;
   17212 
   17213 	rv = sc->phy.acquire(sc);
   17214 	if (rv != 0) {
   17215 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   17216 		    __func__);
   17217 		return rv;
   17218 	}
   17219 
   17220 	rv = wm_set_mdio_slow_mode_hv_locked(sc);
   17221 
   17222 	sc->phy.release(sc);
   17223 
   17224 	return rv;
   17225 }
   17226 
   17227 static int
   17228 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
   17229 {
   17230 	int rv;
   17231 	uint16_t reg;
   17232 
   17233 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   17234 	if (rv != 0)
   17235 		return rv;
   17236 
   17237 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   17238 	    reg | HV_KMRN_MDIO_SLOW);
   17239 }
   17240 
   17241 /*
   17242  *  wm_configure_k1_ich8lan - Configure K1 power state
   17243  *  @sc: pointer to the HW structure
   17244  *  @enable: K1 state to configure
   17245  *
   17246  *  Configure the K1 power state based on the provided parameter.
   17247  *  Assumes semaphore already acquired.
   17248  */
   17249 static void
   17250 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   17251 {
   17252 	uint32_t ctrl, ctrl_ext, tmp;
   17253 	uint16_t kmreg;
   17254 	int rv;
   17255 
   17256 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17257 
   17258 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   17259 	if (rv != 0)
   17260 		return;
   17261 
   17262 	if (k1_enable)
   17263 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   17264 	else
   17265 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   17266 
   17267 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   17268 	if (rv != 0)
   17269 		return;
   17270 
   17271 	delay(20);
   17272 
   17273 	ctrl = CSR_READ(sc, WMREG_CTRL);
   17274 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   17275 
   17276 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   17277 	tmp |= CTRL_FRCSPD;
   17278 
   17279 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   17280 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   17281 	CSR_WRITE_FLUSH(sc);
   17282 	delay(20);
   17283 
   17284 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   17285 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   17286 	CSR_WRITE_FLUSH(sc);
   17287 	delay(20);
   17288 
   17289 	return;
   17290 }
   17291 
   17292 /* special case - for 82575 - need to do manual init ... */
   17293 static void
   17294 wm_reset_init_script_82575(struct wm_softc *sc)
   17295 {
   17296 	/*
   17297 	 * Remark: this is untested code - we have no board without EEPROM
   17298 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   17299 	 */
   17300 
   17301 	/* SerDes configuration via SERDESCTRL */
   17302 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   17303 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   17304 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   17305 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   17306 
   17307 	/* CCM configuration via CCMCTL register */
   17308 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   17309 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   17310 
   17311 	/* PCIe lanes configuration */
   17312 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   17313 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   17314 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   17315 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   17316 
   17317 	/* PCIe PLL Configuration */
   17318 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   17319 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   17320 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   17321 }
   17322 
   17323 static void
   17324 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   17325 {
   17326 	uint32_t reg;
   17327 	uint16_t nvmword;
   17328 	int rv;
   17329 
   17330 	if (sc->sc_type != WM_T_82580)
   17331 		return;
   17332 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   17333 		return;
   17334 
   17335 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   17336 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   17337 	if (rv != 0) {
   17338 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   17339 		    __func__);
   17340 		return;
   17341 	}
   17342 
   17343 	reg = CSR_READ(sc, WMREG_MDICNFG);
   17344 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   17345 		reg |= MDICNFG_DEST;
   17346 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   17347 		reg |= MDICNFG_COM_MDIO;
   17348 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17349 }
   17350 
   17351 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   17352 
   17353 static bool
   17354 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   17355 {
   17356 	uint32_t reg;
   17357 	uint16_t id1, id2;
   17358 	int i, rv;
   17359 
   17360 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17361 		device_xname(sc->sc_dev), __func__));
   17362 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17363 
   17364 	id1 = id2 = 0xffff;
   17365 	for (i = 0; i < 2; i++) {
   17366 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17367 		    &id1);
   17368 		if ((rv != 0) || MII_INVALIDID(id1))
   17369 			continue;
   17370 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17371 		    &id2);
   17372 		if ((rv != 0) || MII_INVALIDID(id2))
   17373 			continue;
   17374 		break;
   17375 	}
   17376 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   17377 		goto out;
   17378 
   17379 	/*
   17380 	 * In case the PHY needs to be in mdio slow mode,
   17381 	 * set slow mode and try to get the PHY id again.
   17382 	 */
   17383 	rv = 0;
   17384 	if (sc->sc_type < WM_T_PCH_LPT) {
   17385 		wm_set_mdio_slow_mode_hv_locked(sc);
   17386 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17387 		    &id1);
   17388 		rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17389 		    &id2);
   17390 	}
   17391 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   17392 		device_printf(sc->sc_dev, "XXX return with false\n");
   17393 		return false;
   17394 	}
   17395 out:
   17396 	if (sc->sc_type >= WM_T_PCH_LPT) {
   17397 		/* Only unforce SMBus if ME is not active */
   17398 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   17399 			uint16_t phyreg;
   17400 
   17401 			/* Unforce SMBus mode in PHY */
   17402 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   17403 			    CV_SMB_CTRL, &phyreg);
   17404 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   17405 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   17406 			    CV_SMB_CTRL, phyreg);
   17407 
   17408 			/* Unforce SMBus mode in MAC */
   17409 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17410 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   17411 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17412 		}
   17413 	}
   17414 	return true;
   17415 }
   17416 
   17417 static void
   17418 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   17419 {
   17420 	uint32_t reg;
   17421 	int i;
   17422 
   17423 	/* Set PHY Config Counter to 50msec */
   17424 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   17425 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   17426 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   17427 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   17428 
   17429 	/* Toggle LANPHYPC */
   17430 	reg = CSR_READ(sc, WMREG_CTRL);
   17431 	reg |= CTRL_LANPHYPC_OVERRIDE;
   17432 	reg &= ~CTRL_LANPHYPC_VALUE;
   17433 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17434 	CSR_WRITE_FLUSH(sc);
   17435 	delay(1000);
   17436 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   17437 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17438 	CSR_WRITE_FLUSH(sc);
   17439 
   17440 	if (sc->sc_type < WM_T_PCH_LPT)
   17441 		delay(50 * 1000);
   17442 	else {
   17443 		i = 20;
   17444 
   17445 		do {
   17446 			delay(5 * 1000);
   17447 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   17448 		    && i--);
   17449 
   17450 		delay(30 * 1000);
   17451 	}
   17452 }
   17453 
   17454 static int
   17455 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   17456 {
   17457 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   17458 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   17459 	uint32_t rxa;
   17460 	uint16_t scale = 0, lat_enc = 0;
   17461 	int32_t obff_hwm = 0;
   17462 	int64_t lat_ns, value;
   17463 
   17464 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17465 		device_xname(sc->sc_dev), __func__));
   17466 
   17467 	if (link) {
   17468 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   17469 		uint32_t status;
   17470 		uint16_t speed;
   17471 		pcireg_t preg;
   17472 
   17473 		status = CSR_READ(sc, WMREG_STATUS);
   17474 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   17475 		case STATUS_SPEED_10:
   17476 			speed = 10;
   17477 			break;
   17478 		case STATUS_SPEED_100:
   17479 			speed = 100;
   17480 			break;
   17481 		case STATUS_SPEED_1000:
   17482 			speed = 1000;
   17483 			break;
   17484 		default:
   17485 			device_printf(sc->sc_dev, "Unknown speed "
   17486 			    "(status = %08x)\n", status);
   17487 			return -1;
   17488 		}
   17489 
   17490 		/* Rx Packet Buffer Allocation size (KB) */
   17491 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   17492 
   17493 		/*
   17494 		 * Determine the maximum latency tolerated by the device.
   17495 		 *
   17496 		 * Per the PCIe spec, the tolerated latencies are encoded as
   17497 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   17498 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   17499 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   17500 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   17501 		 */
   17502 		lat_ns = ((int64_t)rxa * 1024 -
   17503 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   17504 			+ ETHER_HDR_LEN))) * 8 * 1000;
   17505 		if (lat_ns < 0)
   17506 			lat_ns = 0;
   17507 		else
   17508 			lat_ns /= speed;
   17509 		value = lat_ns;
   17510 
   17511 		while (value > LTRV_VALUE) {
   17512 			scale ++;
   17513 			value = howmany(value, __BIT(5));
   17514 		}
   17515 		if (scale > LTRV_SCALE_MAX) {
   17516 			device_printf(sc->sc_dev,
   17517 			    "Invalid LTR latency scale %d\n", scale);
   17518 			return -1;
   17519 		}
   17520 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   17521 
   17522 		/* Determine the maximum latency tolerated by the platform */
   17523 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17524 		    WM_PCI_LTR_CAP_LPT);
   17525 		max_snoop = preg & 0xffff;
   17526 		max_nosnoop = preg >> 16;
   17527 
   17528 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   17529 
   17530 		if (lat_enc > max_ltr_enc) {
   17531 			lat_enc = max_ltr_enc;
   17532 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   17533 			    * PCI_LTR_SCALETONS(
   17534 				    __SHIFTOUT(lat_enc,
   17535 					PCI_LTR_MAXSNOOPLAT_SCALE));
   17536 		}
   17537 
   17538 		if (lat_ns) {
   17539 			lat_ns *= speed * 1000;
   17540 			lat_ns /= 8;
   17541 			lat_ns /= 1000000000;
   17542 			obff_hwm = (int32_t)(rxa - lat_ns);
   17543 		}
   17544 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   17545 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   17546 			    "(rxa = %d, lat_ns = %d)\n",
   17547 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   17548 			return -1;
   17549 		}
   17550 	}
   17551 	/* Snoop and No-Snoop latencies the same */
   17552 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   17553 	CSR_WRITE(sc, WMREG_LTRV, reg);
   17554 
   17555 	/* Set OBFF high water mark */
   17556 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   17557 	reg |= obff_hwm;
   17558 	CSR_WRITE(sc, WMREG_SVT, reg);
   17559 
   17560 	/* Enable OBFF */
   17561 	reg = CSR_READ(sc, WMREG_SVCR);
   17562 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   17563 	CSR_WRITE(sc, WMREG_SVCR, reg);
   17564 
   17565 	return 0;
   17566 }
   17567 
   17568 /*
   17569  * I210 Errata 25 and I211 Errata 10
   17570  * Slow System Clock.
   17571  *
   17572  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   17573  */
   17574 static int
   17575 wm_pll_workaround_i210(struct wm_softc *sc)
   17576 {
   17577 	uint32_t mdicnfg, wuc;
   17578 	uint32_t reg;
   17579 	pcireg_t pcireg;
   17580 	uint32_t pmreg;
   17581 	uint16_t nvmword, tmp_nvmword;
   17582 	uint16_t phyval;
   17583 	bool wa_done = false;
   17584 	int i, rv = 0;
   17585 
   17586 	/* Get Power Management cap offset */
   17587 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   17588 	    &pmreg, NULL) == 0)
   17589 		return -1;
   17590 
   17591 	/* Save WUC and MDICNFG registers */
   17592 	wuc = CSR_READ(sc, WMREG_WUC);
   17593 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   17594 
   17595 	reg = mdicnfg & ~MDICNFG_DEST;
   17596 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17597 
   17598 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   17599 		/*
   17600 		 * The default value of the Initialization Control Word 1
   17601 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   17602 		 */
   17603 		nvmword = INVM_DEFAULT_AL;
   17604 	}
   17605 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   17606 
   17607 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   17608 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   17609 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   17610 
   17611 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   17612 			rv = 0;
   17613 			break; /* OK */
   17614 		} else
   17615 			rv = -1;
   17616 
   17617 		wa_done = true;
   17618 		/* Directly reset the internal PHY */
   17619 		reg = CSR_READ(sc, WMREG_CTRL);
   17620 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   17621 
   17622 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17623 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   17624 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17625 
   17626 		CSR_WRITE(sc, WMREG_WUC, 0);
   17627 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   17628 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17629 
   17630 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17631 		    pmreg + PCI_PMCSR);
   17632 		pcireg |= PCI_PMCSR_STATE_D3;
   17633 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17634 		    pmreg + PCI_PMCSR, pcireg);
   17635 		delay(1000);
   17636 		pcireg &= ~PCI_PMCSR_STATE_D3;
   17637 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17638 		    pmreg + PCI_PMCSR, pcireg);
   17639 
   17640 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   17641 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17642 
   17643 		/* Restore WUC register */
   17644 		CSR_WRITE(sc, WMREG_WUC, wuc);
   17645 	}
   17646 
   17647 	/* Restore MDICNFG setting */
   17648 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   17649 	if (wa_done)
   17650 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   17651 	return rv;
   17652 }
   17653 
   17654 static void
   17655 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   17656 {
   17657 	uint32_t reg;
   17658 
   17659 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17660 		device_xname(sc->sc_dev), __func__));
   17661 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   17662 	    || (sc->sc_type == WM_T_PCH_CNP));
   17663 
   17664 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   17665 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   17666 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17667 
   17668 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17669 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17670 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17671 }
   17672 
   17673 /* Sysctl functions */
   17674 static int
   17675 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
   17676 {
   17677 	struct sysctlnode node = *rnode;
   17678 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17679 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17680 	struct wm_softc *sc = txq->txq_sc;
   17681 	uint32_t reg;
   17682 
   17683 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
   17684 	node.sysctl_data = &reg;
   17685 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17686 }
   17687 
   17688 static int
   17689 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
   17690 {
   17691 	struct sysctlnode node = *rnode;
   17692 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17693 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17694 	struct wm_softc *sc = txq->txq_sc;
   17695 	uint32_t reg;
   17696 
   17697 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
   17698 	node.sysctl_data = &reg;
   17699 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17700 }
   17701 
   17702 #ifdef WM_DEBUG
   17703 static int
   17704 wm_sysctl_debug(SYSCTLFN_ARGS)
   17705 {
   17706 	struct sysctlnode node = *rnode;
   17707 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17708 	uint32_t dflags;
   17709 	int error;
   17710 
   17711 	dflags = sc->sc_debug;
   17712 	node.sysctl_data = &dflags;
   17713 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17714 
   17715 	if (error || newp == NULL)
   17716 		return error;
   17717 
   17718 	sc->sc_debug = dflags;
   17719 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
   17720 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
   17721 
   17722 	return 0;
   17723 }
   17724 #endif
   17725