Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.704
      1 /*	$NetBSD: if_wm.c,v 1.704 2021/05/12 10:16:12 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.704 2021/05/12 10:16:12 knakahara Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 #include <sys/atomic.h>
    111 
    112 #include <sys/rndsource.h>
    113 
    114 #include <net/if.h>
    115 #include <net/if_dl.h>
    116 #include <net/if_media.h>
    117 #include <net/if_ether.h>
    118 
    119 #include <net/bpf.h>
    120 
    121 #include <net/rss_config.h>
    122 
    123 #include <netinet/in.h>			/* XXX for struct ip */
    124 #include <netinet/in_systm.h>		/* XXX for struct ip */
    125 #include <netinet/ip.h>			/* XXX for struct ip */
    126 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    127 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    128 
    129 #include <sys/bus.h>
    130 #include <sys/intr.h>
    131 #include <machine/endian.h>
    132 
    133 #include <dev/mii/mii.h>
    134 #include <dev/mii/mdio.h>
    135 #include <dev/mii/miivar.h>
    136 #include <dev/mii/miidevs.h>
    137 #include <dev/mii/mii_bitbang.h>
    138 #include <dev/mii/ikphyreg.h>
    139 #include <dev/mii/igphyreg.h>
    140 #include <dev/mii/igphyvar.h>
    141 #include <dev/mii/inbmphyreg.h>
    142 #include <dev/mii/ihphyreg.h>
    143 #include <dev/mii/makphyreg.h>
    144 
    145 #include <dev/pci/pcireg.h>
    146 #include <dev/pci/pcivar.h>
    147 #include <dev/pci/pcidevs.h>
    148 
    149 #include <dev/pci/if_wmreg.h>
    150 #include <dev/pci/if_wmvar.h>
    151 
    152 #ifdef WM_DEBUG
    153 #define	WM_DEBUG_LINK		__BIT(0)
    154 #define	WM_DEBUG_TX		__BIT(1)
    155 #define	WM_DEBUG_RX		__BIT(2)
    156 #define	WM_DEBUG_GMII		__BIT(3)
    157 #define	WM_DEBUG_MANAGE		__BIT(4)
    158 #define	WM_DEBUG_NVM		__BIT(5)
    159 #define	WM_DEBUG_INIT		__BIT(6)
    160 #define	WM_DEBUG_LOCK		__BIT(7)
    161 
    162 #if 0
    163 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    164 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    165 	WM_DEBUG_LOCK
    166 #endif
    167 
    168 #define	DPRINTF(sc, x, y)			  \
    169 	do {					  \
    170 		if ((sc)->sc_debug & (x))	  \
    171 			printf y;		  \
    172 	} while (0)
    173 #else
    174 #define	DPRINTF(sc, x, y)	__nothing
    175 #endif /* WM_DEBUG */
    176 
    177 #ifdef NET_MPSAFE
    178 #define WM_MPSAFE	1
    179 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    180 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    181 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    182 #else
    183 #define WM_CALLOUT_FLAGS	0
    184 #define WM_SOFTINT_FLAGS	0
    185 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    186 #endif
    187 
    188 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    189 
    190 /*
    191  * This device driver's max interrupt numbers.
    192  */
    193 #define WM_MAX_NQUEUEINTR	16
    194 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    195 
    196 #ifndef WM_DISABLE_MSI
    197 #define	WM_DISABLE_MSI 0
    198 #endif
    199 #ifndef WM_DISABLE_MSIX
    200 #define	WM_DISABLE_MSIX 0
    201 #endif
    202 
    203 int wm_disable_msi = WM_DISABLE_MSI;
    204 int wm_disable_msix = WM_DISABLE_MSIX;
    205 
    206 #ifndef WM_WATCHDOG_TIMEOUT
    207 #define WM_WATCHDOG_TIMEOUT 5
    208 #endif
    209 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    210 
    211 /*
    212  * Transmit descriptor list size.  Due to errata, we can only have
    213  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    214  * on >= 82544. We tell the upper layers that they can queue a lot
    215  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    216  * of them at a time.
    217  *
    218  * We allow up to 64 DMA segments per packet.  Pathological packet
    219  * chains containing many small mbufs have been observed in zero-copy
    220  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    221  * m_defrag() is called to reduce it.
    222  */
    223 #define	WM_NTXSEGS		64
    224 #define	WM_IFQUEUELEN		256
    225 #define	WM_TXQUEUELEN_MAX	64
    226 #define	WM_TXQUEUELEN_MAX_82547	16
    227 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    228 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    229 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    230 #define	WM_NTXDESC_82542	256
    231 #define	WM_NTXDESC_82544	4096
    232 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    233 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    234 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    235 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    236 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    237 
    238 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    239 
    240 #define	WM_TXINTERQSIZE		256
    241 
    242 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 /*
    250  * Receive descriptor list size.  We have one Rx buffer for normal
    251  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    252  * packet.  We allocate 256 receive descriptors, each with a 2k
    253  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    254  */
    255 #define	WM_NRXDESC		256U
    256 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    257 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    258 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    259 
    260 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    261 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    262 #endif
    263 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    264 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    265 #endif
    266 
    267 typedef union txdescs {
    268 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    269 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    270 } txdescs_t;
    271 
    272 typedef union rxdescs {
    273 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    274 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    275 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    276 } rxdescs_t;
    277 
    278 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    279 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    280 
    281 /*
    282  * Software state for transmit jobs.
    283  */
    284 struct wm_txsoft {
    285 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    287 	int txs_firstdesc;		/* first descriptor in packet */
    288 	int txs_lastdesc;		/* last descriptor in packet */
    289 	int txs_ndesc;			/* # of descriptors used */
    290 };
    291 
    292 /*
    293  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    294  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    295  * them together.
    296  */
    297 struct wm_rxsoft {
    298 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    299 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    300 };
    301 
    302 #define WM_LINKUP_TIMEOUT	50
    303 
    304 static uint16_t swfwphysem[] = {
    305 	SWFW_PHY0_SM,
    306 	SWFW_PHY1_SM,
    307 	SWFW_PHY2_SM,
    308 	SWFW_PHY3_SM
    309 };
    310 
    311 static const uint32_t wm_82580_rxpbs_table[] = {
    312 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    313 };
    314 
    315 struct wm_softc;
    316 
    317 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    318 #if !defined(WM_EVENT_COUNTERS)
    319 #define WM_EVENT_COUNTERS 1
    320 #endif
    321 #endif
    322 
    323 #ifdef WM_EVENT_COUNTERS
    324 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    325 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    326 	struct evcnt qname##_ev_##evname;
    327 
    328 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    329 	do {								\
    330 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    331 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    332 		    "%s%02d%s", #qname, (qnum), #evname);		\
    333 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    334 		    (evtype), NULL, (xname),				\
    335 		    (q)->qname##_##evname##_evcnt_name);		\
    336 	} while (0)
    337 
    338 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    339 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    340 
    341 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    342 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    343 
    344 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    345 	evcnt_detach(&(q)->qname##_ev_##evname);
    346 #endif /* WM_EVENT_COUNTERS */
    347 
    348 struct wm_txqueue {
    349 	kmutex_t *txq_lock;		/* lock for tx operations */
    350 
    351 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    352 
    353 	/* Software state for the transmit descriptors. */
    354 	int txq_num;			/* must be a power of two */
    355 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    356 
    357 	/* TX control data structures. */
    358 	int txq_ndesc;			/* must be a power of two */
    359 	size_t txq_descsize;		/* a tx descriptor size */
    360 	txdescs_t *txq_descs_u;
    361 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    362 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    363 	int txq_desc_rseg;		/* real number of control segment */
    364 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    365 #define	txq_descs	txq_descs_u->sctxu_txdescs
    366 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    367 
    368 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    369 
    370 	int txq_free;			/* number of free Tx descriptors */
    371 	int txq_next;			/* next ready Tx descriptor */
    372 
    373 	int txq_sfree;			/* number of free Tx jobs */
    374 	int txq_snext;			/* next free Tx job */
    375 	int txq_sdirty;			/* dirty Tx jobs */
    376 
    377 	/* These 4 variables are used only on the 82547. */
    378 	int txq_fifo_size;		/* Tx FIFO size */
    379 	int txq_fifo_head;		/* current head of FIFO */
    380 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    381 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    382 
    383 	/*
    384 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    385 	 * CPUs. This queue intermediate them without block.
    386 	 */
    387 	pcq_t *txq_interq;
    388 
    389 	/*
    390 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    391 	 * to manage Tx H/W queue's busy flag.
    392 	 */
    393 	int txq_flags;			/* flags for H/W queue, see below */
    394 #define	WM_TXQ_NO_SPACE		0x1
    395 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    396 
    397 	bool txq_stopping;
    398 
    399 	bool txq_sending;
    400 	time_t txq_lastsent;
    401 
    402 	/* Checksum flags used for previous packet */
    403 	uint32_t	txq_last_hw_cmd;
    404 	uint8_t		txq_last_hw_fields;
    405 	uint16_t	txq_last_hw_ipcs;
    406 	uint16_t	txq_last_hw_tucs;
    407 
    408 	uint32_t txq_packets;		/* for AIM */
    409 	uint32_t txq_bytes;		/* for AIM */
    410 #ifdef WM_EVENT_COUNTERS
    411 	/* TX event counters */
    412 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    413 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    414 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    415 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    416 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    417 					    /* XXX not used? */
    418 
    419 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    420 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    421 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    422 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    423 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    424 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    425 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    426 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    427 					    /* other than toomanyseg */
    428 
    429 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    430 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    431 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    432 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
    433 
    434 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    435 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    436 #endif /* WM_EVENT_COUNTERS */
    437 };
    438 
    439 struct wm_rxqueue {
    440 	kmutex_t *rxq_lock;		/* lock for rx operations */
    441 
    442 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    443 
    444 	/* Software state for the receive descriptors. */
    445 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    446 
    447 	/* RX control data structures. */
    448 	int rxq_ndesc;			/* must be a power of two */
    449 	size_t rxq_descsize;		/* a rx descriptor size */
    450 	rxdescs_t *rxq_descs_u;
    451 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    452 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    453 	int rxq_desc_rseg;		/* real number of control segment */
    454 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    455 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    456 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    457 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    458 
    459 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    460 
    461 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    462 	int rxq_discard;
    463 	int rxq_len;
    464 	struct mbuf *rxq_head;
    465 	struct mbuf *rxq_tail;
    466 	struct mbuf **rxq_tailp;
    467 
    468 	bool rxq_stopping;
    469 
    470 	uint32_t rxq_packets;		/* for AIM */
    471 	uint32_t rxq_bytes;		/* for AIM */
    472 #ifdef WM_EVENT_COUNTERS
    473 	/* RX event counters */
    474 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    475 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    476 
    477 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    478 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    479 #endif
    480 };
    481 
    482 struct wm_queue {
    483 	int wmq_id;			/* index of TX/RX queues */
    484 	int wmq_intr_idx;		/* index of MSI-X tables */
    485 
    486 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    487 	bool wmq_set_itr;
    488 
    489 	struct wm_txqueue wmq_txq;
    490 	struct wm_rxqueue wmq_rxq;
    491 	char sysctlname[32];		/* Name for sysctl */
    492 
    493 	bool wmq_txrx_use_workqueue;
    494 	struct work wmq_cookie;
    495 	void *wmq_si;
    496 };
    497 
    498 struct wm_phyop {
    499 	int (*acquire)(struct wm_softc *);
    500 	void (*release)(struct wm_softc *);
    501 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    502 	int (*writereg_locked)(device_t, int, int, uint16_t);
    503 	int reset_delay_us;
    504 	bool no_errprint;
    505 };
    506 
    507 struct wm_nvmop {
    508 	int (*acquire)(struct wm_softc *);
    509 	void (*release)(struct wm_softc *);
    510 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    511 };
    512 
    513 /*
    514  * Software state per device.
    515  */
    516 struct wm_softc {
    517 	device_t sc_dev;		/* generic device information */
    518 	bus_space_tag_t sc_st;		/* bus space tag */
    519 	bus_space_handle_t sc_sh;	/* bus space handle */
    520 	bus_size_t sc_ss;		/* bus space size */
    521 	bus_space_tag_t sc_iot;		/* I/O space tag */
    522 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    523 	bus_size_t sc_ios;		/* I/O space size */
    524 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    525 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    526 	bus_size_t sc_flashs;		/* flash registers space size */
    527 	off_t sc_flashreg_offset;	/*
    528 					 * offset to flash registers from
    529 					 * start of BAR
    530 					 */
    531 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    532 
    533 	struct ethercom sc_ethercom;	/* ethernet common data */
    534 	struct mii_data sc_mii;		/* MII/media information */
    535 
    536 	pci_chipset_tag_t sc_pc;
    537 	pcitag_t sc_pcitag;
    538 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    539 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    540 
    541 	uint16_t sc_pcidevid;		/* PCI device ID */
    542 	wm_chip_type sc_type;		/* MAC type */
    543 	int sc_rev;			/* MAC revision */
    544 	wm_phy_type sc_phytype;		/* PHY type */
    545 	uint8_t sc_sfptype;		/* SFP type */
    546 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    547 #define	WM_MEDIATYPE_UNKNOWN		0x00
    548 #define	WM_MEDIATYPE_FIBER		0x01
    549 #define	WM_MEDIATYPE_COPPER		0x02
    550 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    551 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    552 	int sc_flags;			/* flags; see below */
    553 	u_short sc_if_flags;		/* last if_flags */
    554 	int sc_ec_capenable;		/* last ec_capenable */
    555 	int sc_flowflags;		/* 802.3x flow control flags */
    556 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    557 	int sc_align_tweak;
    558 
    559 	void *sc_ihs[WM_MAX_NINTR];	/*
    560 					 * interrupt cookie.
    561 					 * - legacy and msi use sc_ihs[0] only
    562 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    563 					 */
    564 	pci_intr_handle_t *sc_intrs;	/*
    565 					 * legacy and msi use sc_intrs[0] only
    566 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    567 					 */
    568 	int sc_nintrs;			/* number of interrupts */
    569 
    570 	int sc_link_intr_idx;		/* index of MSI-X tables */
    571 
    572 	callout_t sc_tick_ch;		/* tick callout */
    573 	bool sc_core_stopping;
    574 
    575 	int sc_nvm_ver_major;
    576 	int sc_nvm_ver_minor;
    577 	int sc_nvm_ver_build;
    578 	int sc_nvm_addrbits;		/* NVM address bits */
    579 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    580 	int sc_ich8_flash_base;
    581 	int sc_ich8_flash_bank_size;
    582 	int sc_nvm_k1_enabled;
    583 
    584 	int sc_nqueues;
    585 	struct wm_queue *sc_queue;
    586 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    587 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    588 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    589 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    590 	struct workqueue *sc_queue_wq;
    591 	bool sc_txrx_use_workqueue;
    592 
    593 	int sc_affinity_offset;
    594 
    595 #ifdef WM_EVENT_COUNTERS
    596 	/* Event counters. */
    597 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    598 
    599 	/* WM_T_82542_2_1 only */
    600 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    601 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    602 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    603 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    604 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    605 #endif /* WM_EVENT_COUNTERS */
    606 
    607 	struct sysctllog *sc_sysctllog;
    608 
    609 	/* This variable are used only on the 82547. */
    610 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    611 
    612 	uint32_t sc_ctrl;		/* prototype CTRL register */
    613 #if 0
    614 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    615 #endif
    616 	uint32_t sc_icr;		/* prototype interrupt bits */
    617 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    618 	uint32_t sc_tctl;		/* prototype TCTL register */
    619 	uint32_t sc_rctl;		/* prototype RCTL register */
    620 	uint32_t sc_txcw;		/* prototype TXCW register */
    621 	uint32_t sc_tipg;		/* prototype TIPG register */
    622 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    623 	uint32_t sc_pba;		/* prototype PBA register */
    624 
    625 	int sc_tbi_linkup;		/* TBI link status */
    626 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    627 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    628 
    629 	int sc_mchash_type;		/* multicast filter offset */
    630 
    631 	krndsource_t rnd_source;	/* random source */
    632 
    633 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    634 
    635 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    636 	kmutex_t *sc_ich_phymtx;	/*
    637 					 * 82574/82583/ICH/PCH specific PHY
    638 					 * mutex. For 82574/82583, the mutex
    639 					 * is used for both PHY and NVM.
    640 					 */
    641 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    642 
    643 	struct wm_phyop phy;
    644 	struct wm_nvmop nvm;
    645 #ifdef WM_DEBUG
    646 	uint32_t sc_debug;
    647 #endif
    648 };
    649 
    650 #define WM_CORE_LOCK(_sc)						\
    651 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    652 #define WM_CORE_UNLOCK(_sc)						\
    653 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    654 #define WM_CORE_LOCKED(_sc)						\
    655 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    656 
    657 #define	WM_RXCHAIN_RESET(rxq)						\
    658 do {									\
    659 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    660 	*(rxq)->rxq_tailp = NULL;					\
    661 	(rxq)->rxq_len = 0;						\
    662 } while (/*CONSTCOND*/0)
    663 
    664 #define	WM_RXCHAIN_LINK(rxq, m)						\
    665 do {									\
    666 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    667 	(rxq)->rxq_tailp = &(m)->m_next;				\
    668 } while (/*CONSTCOND*/0)
    669 
    670 #ifdef WM_EVENT_COUNTERS
    671 #ifdef __HAVE_ATOMIC64_LOADSTORE
    672 #define	WM_EVCNT_INCR(ev)						\
    673 	atomic_store_relaxed(&((ev)->ev_count),				\
    674 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    675 #define	WM_EVCNT_ADD(ev, val)						\
    676 	atomic_store_relaxed(&((ev)->ev_count),				\
    677 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    678 #else
    679 #define	WM_EVCNT_INCR(ev)						\
    680 	((ev)->ev_count)++
    681 #define	WM_EVCNT_ADD(ev, val)						\
    682 	(ev)->ev_count += (val)
    683 #endif
    684 
    685 #define WM_Q_EVCNT_INCR(qname, evname)			\
    686 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    687 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    688 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    689 #else /* !WM_EVENT_COUNTERS */
    690 #define	WM_EVCNT_INCR(ev)	/* nothing */
    691 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    692 
    693 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    694 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    695 #endif /* !WM_EVENT_COUNTERS */
    696 
    697 #define	CSR_READ(sc, reg)						\
    698 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    699 #define	CSR_WRITE(sc, reg, val)						\
    700 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    701 #define	CSR_WRITE_FLUSH(sc)						\
    702 	(void)CSR_READ((sc), WMREG_STATUS)
    703 
    704 #define ICH8_FLASH_READ32(sc, reg)					\
    705 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    706 	    (reg) + sc->sc_flashreg_offset)
    707 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    708 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    709 	    (reg) + sc->sc_flashreg_offset, (data))
    710 
    711 #define ICH8_FLASH_READ16(sc, reg)					\
    712 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    713 	    (reg) + sc->sc_flashreg_offset)
    714 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    715 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    716 	    (reg) + sc->sc_flashreg_offset, (data))
    717 
    718 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    719 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    720 
    721 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    722 #define	WM_CDTXADDR_HI(txq, x)						\
    723 	(sizeof(bus_addr_t) == 8 ?					\
    724 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    725 
    726 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    727 #define	WM_CDRXADDR_HI(rxq, x)						\
    728 	(sizeof(bus_addr_t) == 8 ?					\
    729 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    730 
    731 /*
    732  * Register read/write functions.
    733  * Other than CSR_{READ|WRITE}().
    734  */
    735 #if 0
    736 static inline uint32_t wm_io_read(struct wm_softc *, int);
    737 #endif
    738 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    739 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    740     uint32_t, uint32_t);
    741 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    742 
    743 /*
    744  * Descriptor sync/init functions.
    745  */
    746 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    747 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    748 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    749 
    750 /*
    751  * Device driver interface functions and commonly used functions.
    752  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    753  */
    754 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    755 static int	wm_match(device_t, cfdata_t, void *);
    756 static void	wm_attach(device_t, device_t, void *);
    757 static int	wm_detach(device_t, int);
    758 static bool	wm_suspend(device_t, const pmf_qual_t *);
    759 static bool	wm_resume(device_t, const pmf_qual_t *);
    760 static void	wm_watchdog(struct ifnet *);
    761 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    762     uint16_t *);
    763 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    764     uint16_t *);
    765 static void	wm_tick(void *);
    766 static int	wm_ifflags_cb(struct ethercom *);
    767 static int	wm_ioctl(struct ifnet *, u_long, void *);
    768 /* MAC address related */
    769 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    770 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    771 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    772 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    773 static int	wm_rar_count(struct wm_softc *);
    774 static void	wm_set_filter(struct wm_softc *);
    775 /* Reset and init related */
    776 static void	wm_set_vlan(struct wm_softc *);
    777 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    778 static void	wm_get_auto_rd_done(struct wm_softc *);
    779 static void	wm_lan_init_done(struct wm_softc *);
    780 static void	wm_get_cfg_done(struct wm_softc *);
    781 static int	wm_phy_post_reset(struct wm_softc *);
    782 static int	wm_write_smbus_addr(struct wm_softc *);
    783 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    784 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    785 static void	wm_initialize_hardware_bits(struct wm_softc *);
    786 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    787 static int	wm_reset_phy(struct wm_softc *);
    788 static void	wm_flush_desc_rings(struct wm_softc *);
    789 static void	wm_reset(struct wm_softc *);
    790 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    791 static void	wm_rxdrain(struct wm_rxqueue *);
    792 static void	wm_init_rss(struct wm_softc *);
    793 static void	wm_adjust_qnum(struct wm_softc *, int);
    794 static inline bool	wm_is_using_msix(struct wm_softc *);
    795 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    796 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    797 static int	wm_setup_legacy(struct wm_softc *);
    798 static int	wm_setup_msix(struct wm_softc *);
    799 static int	wm_init(struct ifnet *);
    800 static int	wm_init_locked(struct ifnet *);
    801 static void	wm_init_sysctls(struct wm_softc *);
    802 static void	wm_unset_stopping_flags(struct wm_softc *);
    803 static void	wm_set_stopping_flags(struct wm_softc *);
    804 static void	wm_stop(struct ifnet *, int);
    805 static void	wm_stop_locked(struct ifnet *, bool, bool);
    806 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    807 static void	wm_82547_txfifo_stall(void *);
    808 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    809 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    810 /* DMA related */
    811 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    812 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    813 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    814 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    815     struct wm_txqueue *);
    816 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    817 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    818 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    819     struct wm_rxqueue *);
    820 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    821 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    822 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    823 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    824 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    825 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    826 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    827     struct wm_txqueue *);
    828 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    829     struct wm_rxqueue *);
    830 static int	wm_alloc_txrx_queues(struct wm_softc *);
    831 static void	wm_free_txrx_queues(struct wm_softc *);
    832 static int	wm_init_txrx_queues(struct wm_softc *);
    833 /* Start */
    834 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    835     struct wm_txsoft *, uint32_t *, uint8_t *);
    836 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    837 static void	wm_start(struct ifnet *);
    838 static void	wm_start_locked(struct ifnet *);
    839 static int	wm_transmit(struct ifnet *, struct mbuf *);
    840 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    841 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    842 		    bool);
    843 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    844     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    845 static void	wm_nq_start(struct ifnet *);
    846 static void	wm_nq_start_locked(struct ifnet *);
    847 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    848 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    849 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    850 		    bool);
    851 static void	wm_deferred_start_locked(struct wm_txqueue *);
    852 static void	wm_handle_queue(void *);
    853 static void	wm_handle_queue_work(struct work *, void *);
    854 /* Interrupt */
    855 static bool	wm_txeof(struct wm_txqueue *, u_int);
    856 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    857 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    858 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    859 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    860 static void	wm_linkintr(struct wm_softc *, uint32_t);
    861 static int	wm_intr_legacy(void *);
    862 static inline void	wm_txrxintr_disable(struct wm_queue *);
    863 static inline void	wm_txrxintr_enable(struct wm_queue *);
    864 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    865 static int	wm_txrxintr_msix(void *);
    866 static int	wm_linkintr_msix(void *);
    867 
    868 /*
    869  * Media related.
    870  * GMII, SGMII, TBI, SERDES and SFP.
    871  */
    872 /* Common */
    873 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    874 /* GMII related */
    875 static void	wm_gmii_reset(struct wm_softc *);
    876 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    877 static int	wm_get_phy_id_82575(struct wm_softc *);
    878 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    879 static int	wm_gmii_mediachange(struct ifnet *);
    880 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    881 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    882 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    883 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    884 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    885 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    886 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    887 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    888 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    889 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    890 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    891 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    892 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    893 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    894 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    895 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    896 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    897 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    898 	bool);
    899 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    900 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    901 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    902 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    903 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    904 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    905 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    906 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    907 static void	wm_gmii_statchg(struct ifnet *);
    908 /*
    909  * kumeran related (80003, ICH* and PCH*).
    910  * These functions are not for accessing MII registers but for accessing
    911  * kumeran specific registers.
    912  */
    913 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    914 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    915 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    916 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    917 /* EMI register related */
    918 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    919 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    920 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    921 /* SGMII */
    922 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    923 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    924 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    925 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    926 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    927 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    928 /* TBI related */
    929 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    930 static void	wm_tbi_mediainit(struct wm_softc *);
    931 static int	wm_tbi_mediachange(struct ifnet *);
    932 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    933 static int	wm_check_for_link(struct wm_softc *);
    934 static void	wm_tbi_tick(struct wm_softc *);
    935 /* SERDES related */
    936 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    937 static int	wm_serdes_mediachange(struct ifnet *);
    938 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    939 static void	wm_serdes_tick(struct wm_softc *);
    940 /* SFP related */
    941 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    942 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    943 
    944 /*
    945  * NVM related.
    946  * Microwire, SPI (w/wo EERD) and Flash.
    947  */
    948 /* Misc functions */
    949 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    950 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    951 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    952 /* Microwire */
    953 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    954 /* SPI */
    955 static int	wm_nvm_ready_spi(struct wm_softc *);
    956 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    957 /* Using with EERD */
    958 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    959 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    960 /* Flash */
    961 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    962     unsigned int *);
    963 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    964 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    965 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    966     uint32_t *);
    967 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    968 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    969 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    970 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    971 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    972 /* iNVM */
    973 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    974 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    975 /* Lock, detecting NVM type, validate checksum and read */
    976 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    977 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    978 static int	wm_nvm_validate_checksum(struct wm_softc *);
    979 static void	wm_nvm_version_invm(struct wm_softc *);
    980 static void	wm_nvm_version(struct wm_softc *);
    981 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    982 
    983 /*
    984  * Hardware semaphores.
    985  * Very complexed...
    986  */
    987 static int	wm_get_null(struct wm_softc *);
    988 static void	wm_put_null(struct wm_softc *);
    989 static int	wm_get_eecd(struct wm_softc *);
    990 static void	wm_put_eecd(struct wm_softc *);
    991 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    992 static void	wm_put_swsm_semaphore(struct wm_softc *);
    993 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    994 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    995 static int	wm_get_nvm_80003(struct wm_softc *);
    996 static void	wm_put_nvm_80003(struct wm_softc *);
    997 static int	wm_get_nvm_82571(struct wm_softc *);
    998 static void	wm_put_nvm_82571(struct wm_softc *);
    999 static int	wm_get_phy_82575(struct wm_softc *);
   1000 static void	wm_put_phy_82575(struct wm_softc *);
   1001 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1002 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1003 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1004 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1005 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1006 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1007 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1008 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1009 
   1010 /*
   1011  * Management mode and power management related subroutines.
   1012  * BMC, AMT, suspend/resume and EEE.
   1013  */
   1014 #if 0
   1015 static int	wm_check_mng_mode(struct wm_softc *);
   1016 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1017 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1018 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1019 #endif
   1020 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1021 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1022 static void	wm_get_hw_control(struct wm_softc *);
   1023 static void	wm_release_hw_control(struct wm_softc *);
   1024 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1025 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1026 static void	wm_init_manageability(struct wm_softc *);
   1027 static void	wm_release_manageability(struct wm_softc *);
   1028 static void	wm_get_wakeup(struct wm_softc *);
   1029 static int	wm_ulp_disable(struct wm_softc *);
   1030 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1031 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1032 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1033 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1034 static void	wm_enable_wakeup(struct wm_softc *);
   1035 static void	wm_disable_aspm(struct wm_softc *);
   1036 /* LPLU (Low Power Link Up) */
   1037 static void	wm_lplu_d0_disable(struct wm_softc *);
   1038 /* EEE */
   1039 static int	wm_set_eee_i350(struct wm_softc *);
   1040 static int	wm_set_eee_pchlan(struct wm_softc *);
   1041 static int	wm_set_eee(struct wm_softc *);
   1042 
   1043 /*
   1044  * Workarounds (mainly PHY related).
   1045  * Basically, PHY's workarounds are in the PHY drivers.
   1046  */
   1047 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1048 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1049 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1050 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1051 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1052 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1053 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1054 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1055 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1056 static int	wm_k1_workaround_lv(struct wm_softc *);
   1057 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1058 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1059 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1060 static void	wm_reset_init_script_82575(struct wm_softc *);
   1061 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1062 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1063 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1064 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1065 static int	wm_pll_workaround_i210(struct wm_softc *);
   1066 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1067 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1068 static void	wm_set_linkdown_discard(struct wm_softc *);
   1069 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1070 
   1071 #ifdef WM_DEBUG
   1072 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1073 #endif
   1074 
   1075 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1076     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1077 
   1078 /*
   1079  * Devices supported by this driver.
   1080  */
   1081 static const struct wm_product {
   1082 	pci_vendor_id_t		wmp_vendor;
   1083 	pci_product_id_t	wmp_product;
   1084 	const char		*wmp_name;
   1085 	wm_chip_type		wmp_type;
   1086 	uint32_t		wmp_flags;
   1087 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1088 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1089 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1090 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1091 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1092 } wm_products[] = {
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1094 	  "Intel i82542 1000BASE-X Ethernet",
   1095 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1096 
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1098 	  "Intel i82543GC 1000BASE-X Ethernet",
   1099 	  WM_T_82543,		WMP_F_FIBER },
   1100 
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1102 	  "Intel i82543GC 1000BASE-T Ethernet",
   1103 	  WM_T_82543,		WMP_F_COPPER },
   1104 
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1106 	  "Intel i82544EI 1000BASE-T Ethernet",
   1107 	  WM_T_82544,		WMP_F_COPPER },
   1108 
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1110 	  "Intel i82544EI 1000BASE-X Ethernet",
   1111 	  WM_T_82544,		WMP_F_FIBER },
   1112 
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1114 	  "Intel i82544GC 1000BASE-T Ethernet",
   1115 	  WM_T_82544,		WMP_F_COPPER },
   1116 
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1118 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1119 	  WM_T_82544,		WMP_F_COPPER },
   1120 
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1122 	  "Intel i82540EM 1000BASE-T Ethernet",
   1123 	  WM_T_82540,		WMP_F_COPPER },
   1124 
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1126 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1127 	  WM_T_82540,		WMP_F_COPPER },
   1128 
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1130 	  "Intel i82540EP 1000BASE-T Ethernet",
   1131 	  WM_T_82540,		WMP_F_COPPER },
   1132 
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1134 	  "Intel i82540EP 1000BASE-T Ethernet",
   1135 	  WM_T_82540,		WMP_F_COPPER },
   1136 
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1138 	  "Intel i82540EP 1000BASE-T Ethernet",
   1139 	  WM_T_82540,		WMP_F_COPPER },
   1140 
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1142 	  "Intel i82545EM 1000BASE-T Ethernet",
   1143 	  WM_T_82545,		WMP_F_COPPER },
   1144 
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1146 	  "Intel i82545GM 1000BASE-T Ethernet",
   1147 	  WM_T_82545_3,		WMP_F_COPPER },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1150 	  "Intel i82545GM 1000BASE-X Ethernet",
   1151 	  WM_T_82545_3,		WMP_F_FIBER },
   1152 
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1154 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1155 	  WM_T_82545_3,		WMP_F_SERDES },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1158 	  "Intel i82546EB 1000BASE-T Ethernet",
   1159 	  WM_T_82546,		WMP_F_COPPER },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1162 	  "Intel i82546EB 1000BASE-T Ethernet",
   1163 	  WM_T_82546,		WMP_F_COPPER },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1166 	  "Intel i82545EM 1000BASE-X Ethernet",
   1167 	  WM_T_82545,		WMP_F_FIBER },
   1168 
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1170 	  "Intel i82546EB 1000BASE-X Ethernet",
   1171 	  WM_T_82546,		WMP_F_FIBER },
   1172 
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1174 	  "Intel i82546GB 1000BASE-T Ethernet",
   1175 	  WM_T_82546_3,		WMP_F_COPPER },
   1176 
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1178 	  "Intel i82546GB 1000BASE-X Ethernet",
   1179 	  WM_T_82546_3,		WMP_F_FIBER },
   1180 
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1182 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1183 	  WM_T_82546_3,		WMP_F_SERDES },
   1184 
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1186 	  "i82546GB quad-port Gigabit Ethernet",
   1187 	  WM_T_82546_3,		WMP_F_COPPER },
   1188 
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1190 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1191 	  WM_T_82546_3,		WMP_F_COPPER },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1194 	  "Intel PRO/1000MT (82546GB)",
   1195 	  WM_T_82546_3,		WMP_F_COPPER },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1198 	  "Intel i82541EI 1000BASE-T Ethernet",
   1199 	  WM_T_82541,		WMP_F_COPPER },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1202 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1203 	  WM_T_82541,		WMP_F_COPPER },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1206 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1207 	  WM_T_82541,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1210 	  "Intel i82541ER 1000BASE-T Ethernet",
   1211 	  WM_T_82541_2,		WMP_F_COPPER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1214 	  "Intel i82541GI 1000BASE-T Ethernet",
   1215 	  WM_T_82541_2,		WMP_F_COPPER },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1218 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1219 	  WM_T_82541_2,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1222 	  "Intel i82541PI 1000BASE-T Ethernet",
   1223 	  WM_T_82541_2,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1226 	  "Intel i82547EI 1000BASE-T Ethernet",
   1227 	  WM_T_82547,		WMP_F_COPPER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1230 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1231 	  WM_T_82547,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1234 	  "Intel i82547GI 1000BASE-T Ethernet",
   1235 	  WM_T_82547_2,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1238 	  "Intel PRO/1000 PT (82571EB)",
   1239 	  WM_T_82571,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1242 	  "Intel PRO/1000 PF (82571EB)",
   1243 	  WM_T_82571,		WMP_F_FIBER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1246 	  "Intel PRO/1000 PB (82571EB)",
   1247 	  WM_T_82571,		WMP_F_SERDES },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1250 	  "Intel PRO/1000 QT (82571EB)",
   1251 	  WM_T_82571,		WMP_F_COPPER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1254 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1255 	  WM_T_82571,		WMP_F_COPPER },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1258 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1259 	  WM_T_82571,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1262 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1263 	  WM_T_82571,		WMP_F_SERDES },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1266 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1267 	  WM_T_82571,		WMP_F_SERDES },
   1268 
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1270 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1271 	  WM_T_82571,		WMP_F_FIBER },
   1272 
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1274 	  "Intel i82572EI 1000baseT Ethernet",
   1275 	  WM_T_82572,		WMP_F_COPPER },
   1276 
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1278 	  "Intel i82572EI 1000baseX Ethernet",
   1279 	  WM_T_82572,		WMP_F_FIBER },
   1280 
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1282 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1283 	  WM_T_82572,		WMP_F_SERDES },
   1284 
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1286 	  "Intel i82572EI 1000baseT Ethernet",
   1287 	  WM_T_82572,		WMP_F_COPPER },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1290 	  "Intel i82573E",
   1291 	  WM_T_82573,		WMP_F_COPPER },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1294 	  "Intel i82573E IAMT",
   1295 	  WM_T_82573,		WMP_F_COPPER },
   1296 
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1298 	  "Intel i82573L Gigabit Ethernet",
   1299 	  WM_T_82573,		WMP_F_COPPER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1302 	  "Intel i82574L",
   1303 	  WM_T_82574,		WMP_F_COPPER },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1306 	  "Intel i82574L",
   1307 	  WM_T_82574,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1310 	  "Intel i82583V",
   1311 	  WM_T_82583,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1314 	  "i80003 dual 1000baseT Ethernet",
   1315 	  WM_T_80003,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1318 	  "i80003 dual 1000baseX Ethernet",
   1319 	  WM_T_80003,		WMP_F_COPPER },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1322 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1323 	  WM_T_80003,		WMP_F_SERDES },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1326 	  "Intel i80003 1000baseT Ethernet",
   1327 	  WM_T_80003,		WMP_F_COPPER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1330 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1331 	  WM_T_80003,		WMP_F_SERDES },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1334 	  "Intel i82801H (M_AMT) LAN Controller",
   1335 	  WM_T_ICH8,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1337 	  "Intel i82801H (AMT) LAN Controller",
   1338 	  WM_T_ICH8,		WMP_F_COPPER },
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1340 	  "Intel i82801H LAN Controller",
   1341 	  WM_T_ICH8,		WMP_F_COPPER },
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1343 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1344 	  WM_T_ICH8,		WMP_F_COPPER },
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1346 	  "Intel i82801H (M) LAN Controller",
   1347 	  WM_T_ICH8,		WMP_F_COPPER },
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1349 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1350 	  WM_T_ICH8,		WMP_F_COPPER },
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1352 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1353 	  WM_T_ICH8,		WMP_F_COPPER },
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1355 	  "82567V-3 LAN Controller",
   1356 	  WM_T_ICH8,		WMP_F_COPPER },
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1358 	  "82801I (AMT) LAN Controller",
   1359 	  WM_T_ICH9,		WMP_F_COPPER },
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1361 	  "82801I 10/100 LAN Controller",
   1362 	  WM_T_ICH9,		WMP_F_COPPER },
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1364 	  "82801I (G) 10/100 LAN Controller",
   1365 	  WM_T_ICH9,		WMP_F_COPPER },
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1367 	  "82801I (GT) 10/100 LAN Controller",
   1368 	  WM_T_ICH9,		WMP_F_COPPER },
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1370 	  "82801I (C) LAN Controller",
   1371 	  WM_T_ICH9,		WMP_F_COPPER },
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1373 	  "82801I mobile LAN Controller",
   1374 	  WM_T_ICH9,		WMP_F_COPPER },
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1376 	  "82801I mobile (V) LAN Controller",
   1377 	  WM_T_ICH9,		WMP_F_COPPER },
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1379 	  "82801I mobile (AMT) LAN Controller",
   1380 	  WM_T_ICH9,		WMP_F_COPPER },
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1382 	  "82567LM-4 LAN Controller",
   1383 	  WM_T_ICH9,		WMP_F_COPPER },
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1385 	  "82567LM-2 LAN Controller",
   1386 	  WM_T_ICH10,		WMP_F_COPPER },
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1388 	  "82567LF-2 LAN Controller",
   1389 	  WM_T_ICH10,		WMP_F_COPPER },
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1391 	  "82567LM-3 LAN Controller",
   1392 	  WM_T_ICH10,		WMP_F_COPPER },
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1394 	  "82567LF-3 LAN Controller",
   1395 	  WM_T_ICH10,		WMP_F_COPPER },
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1397 	  "82567V-2 LAN Controller",
   1398 	  WM_T_ICH10,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1400 	  "82567V-3? LAN Controller",
   1401 	  WM_T_ICH10,		WMP_F_COPPER },
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1403 	  "HANKSVILLE LAN Controller",
   1404 	  WM_T_ICH10,		WMP_F_COPPER },
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1406 	  "PCH LAN (82577LM) Controller",
   1407 	  WM_T_PCH,		WMP_F_COPPER },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1409 	  "PCH LAN (82577LC) Controller",
   1410 	  WM_T_PCH,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1412 	  "PCH LAN (82578DM) Controller",
   1413 	  WM_T_PCH,		WMP_F_COPPER },
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1415 	  "PCH LAN (82578DC) Controller",
   1416 	  WM_T_PCH,		WMP_F_COPPER },
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1418 	  "PCH2 LAN (82579LM) Controller",
   1419 	  WM_T_PCH2,		WMP_F_COPPER },
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1421 	  "PCH2 LAN (82579V) Controller",
   1422 	  WM_T_PCH2,		WMP_F_COPPER },
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1424 	  "82575EB dual-1000baseT Ethernet",
   1425 	  WM_T_82575,		WMP_F_COPPER },
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1427 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1428 	  WM_T_82575,		WMP_F_SERDES },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1430 	  "82575GB quad-1000baseT Ethernet",
   1431 	  WM_T_82575,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1433 	  "82575GB quad-1000baseT Ethernet (PM)",
   1434 	  WM_T_82575,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1436 	  "82576 1000BaseT Ethernet",
   1437 	  WM_T_82576,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1439 	  "82576 1000BaseX Ethernet",
   1440 	  WM_T_82576,		WMP_F_FIBER },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1443 	  "82576 gigabit Ethernet (SERDES)",
   1444 	  WM_T_82576,		WMP_F_SERDES },
   1445 
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1447 	  "82576 quad-1000BaseT Ethernet",
   1448 	  WM_T_82576,		WMP_F_COPPER },
   1449 
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1451 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1452 	  WM_T_82576,		WMP_F_COPPER },
   1453 
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1455 	  "82576 gigabit Ethernet",
   1456 	  WM_T_82576,		WMP_F_COPPER },
   1457 
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1459 	  "82576 gigabit Ethernet (SERDES)",
   1460 	  WM_T_82576,		WMP_F_SERDES },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1462 	  "82576 quad-gigabit Ethernet (SERDES)",
   1463 	  WM_T_82576,		WMP_F_SERDES },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1466 	  "82580 1000BaseT Ethernet",
   1467 	  WM_T_82580,		WMP_F_COPPER },
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1469 	  "82580 1000BaseX Ethernet",
   1470 	  WM_T_82580,		WMP_F_FIBER },
   1471 
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1473 	  "82580 1000BaseT Ethernet (SERDES)",
   1474 	  WM_T_82580,		WMP_F_SERDES },
   1475 
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1477 	  "82580 gigabit Ethernet (SGMII)",
   1478 	  WM_T_82580,		WMP_F_COPPER },
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1480 	  "82580 dual-1000BaseT Ethernet",
   1481 	  WM_T_82580,		WMP_F_COPPER },
   1482 
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1484 	  "82580 quad-1000BaseX Ethernet",
   1485 	  WM_T_82580,		WMP_F_FIBER },
   1486 
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1488 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1489 	  WM_T_82580,		WMP_F_COPPER },
   1490 
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1492 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1493 	  WM_T_82580,		WMP_F_SERDES },
   1494 
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1496 	  "DH89XXCC 1000BASE-KX Ethernet",
   1497 	  WM_T_82580,		WMP_F_SERDES },
   1498 
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1500 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1501 	  WM_T_82580,		WMP_F_SERDES },
   1502 
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1504 	  "I350 Gigabit Network Connection",
   1505 	  WM_T_I350,		WMP_F_COPPER },
   1506 
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1508 	  "I350 Gigabit Fiber Network Connection",
   1509 	  WM_T_I350,		WMP_F_FIBER },
   1510 
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1512 	  "I350 Gigabit Backplane Connection",
   1513 	  WM_T_I350,		WMP_F_SERDES },
   1514 
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1516 	  "I350 Quad Port Gigabit Ethernet",
   1517 	  WM_T_I350,		WMP_F_SERDES },
   1518 
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1520 	  "I350 Gigabit Connection",
   1521 	  WM_T_I350,		WMP_F_COPPER },
   1522 
   1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1524 	  "I354 Gigabit Ethernet (KX)",
   1525 	  WM_T_I354,		WMP_F_SERDES },
   1526 
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1528 	  "I354 Gigabit Ethernet (SGMII)",
   1529 	  WM_T_I354,		WMP_F_COPPER },
   1530 
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1532 	  "I354 Gigabit Ethernet (2.5G)",
   1533 	  WM_T_I354,		WMP_F_COPPER },
   1534 
   1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1536 	  "I210-T1 Ethernet Server Adapter",
   1537 	  WM_T_I210,		WMP_F_COPPER },
   1538 
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1540 	  "I210 Ethernet (Copper OEM)",
   1541 	  WM_T_I210,		WMP_F_COPPER },
   1542 
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1544 	  "I210 Ethernet (Copper IT)",
   1545 	  WM_T_I210,		WMP_F_COPPER },
   1546 
   1547 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1548 	  "I210 Ethernet (Copper, FLASH less)",
   1549 	  WM_T_I210,		WMP_F_COPPER },
   1550 
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1552 	  "I210 Gigabit Ethernet (Fiber)",
   1553 	  WM_T_I210,		WMP_F_FIBER },
   1554 
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1556 	  "I210 Gigabit Ethernet (SERDES)",
   1557 	  WM_T_I210,		WMP_F_SERDES },
   1558 
   1559 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1560 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1561 	  WM_T_I210,		WMP_F_SERDES },
   1562 
   1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1564 	  "I210 Gigabit Ethernet (SGMII)",
   1565 	  WM_T_I210,		WMP_F_COPPER },
   1566 
   1567 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1568 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1569 	  WM_T_I210,		WMP_F_COPPER },
   1570 
   1571 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1572 	  "I211 Ethernet (COPPER)",
   1573 	  WM_T_I211,		WMP_F_COPPER },
   1574 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1575 	  "I217 V Ethernet Connection",
   1576 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1577 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1578 	  "I217 LM Ethernet Connection",
   1579 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1580 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1581 	  "I218 V Ethernet Connection",
   1582 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1583 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1584 	  "I218 V Ethernet Connection",
   1585 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1586 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1587 	  "I218 V Ethernet Connection",
   1588 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1589 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1590 	  "I218 LM Ethernet Connection",
   1591 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1592 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1593 	  "I218 LM Ethernet Connection",
   1594 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1595 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1596 	  "I218 LM Ethernet Connection",
   1597 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1598 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1599 	  "I219 LM Ethernet Connection",
   1600 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1601 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1602 	  "I219 LM Ethernet Connection",
   1603 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1604 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1605 	  "I219 LM Ethernet Connection",
   1606 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1607 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1608 	  "I219 LM Ethernet Connection",
   1609 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1610 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1611 	  "I219 LM Ethernet Connection",
   1612 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1613 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1614 	  "I219 LM Ethernet Connection",
   1615 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1616 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1617 	  "I219 LM Ethernet Connection",
   1618 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1619 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1620 	  "I219 LM Ethernet Connection",
   1621 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1622 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1623 	  "I219 LM Ethernet Connection",
   1624 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1625 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1626 	  "I219 LM Ethernet Connection",
   1627 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1628 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1629 	  "I219 LM Ethernet Connection",
   1630 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1632 	  "I219 LM Ethernet Connection",
   1633 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1635 	  "I219 LM Ethernet Connection",
   1636 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1638 	  "I219 LM Ethernet Connection",
   1639 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1640 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1641 	  "I219 LM Ethernet Connection",
   1642 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1644 	  "I219 V Ethernet Connection",
   1645 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1647 	  "I219 V Ethernet Connection",
   1648 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1650 	  "I219 V Ethernet Connection",
   1651 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1652 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1653 	  "I219 V Ethernet Connection",
   1654 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1656 	  "I219 V Ethernet Connection",
   1657 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1659 	  "I219 V Ethernet Connection",
   1660 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1662 	  "I219 V Ethernet Connection",
   1663 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1664 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1665 	  "I219 V Ethernet Connection",
   1666 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1668 	  "I219 V Ethernet Connection",
   1669 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1671 	  "I219 V Ethernet Connection",
   1672 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1674 	  "I219 V Ethernet Connection",
   1675 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1676 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1677 	  "I219 V Ethernet Connection",
   1678 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1679 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1680 	  "I219 V Ethernet Connection",
   1681 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1682 	{ 0,			0,
   1683 	  NULL,
   1684 	  0,			0 },
   1685 };
   1686 
   1687 /*
   1688  * Register read/write functions.
   1689  * Other than CSR_{READ|WRITE}().
   1690  */
   1691 
   1692 #if 0 /* Not currently used */
   1693 static inline uint32_t
   1694 wm_io_read(struct wm_softc *sc, int reg)
   1695 {
   1696 
   1697 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1698 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1699 }
   1700 #endif
   1701 
   1702 static inline void
   1703 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1704 {
   1705 
   1706 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1707 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1708 }
   1709 
   1710 static inline void
   1711 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1712     uint32_t data)
   1713 {
   1714 	uint32_t regval;
   1715 	int i;
   1716 
   1717 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1718 
   1719 	CSR_WRITE(sc, reg, regval);
   1720 
   1721 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1722 		delay(5);
   1723 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1724 			break;
   1725 	}
   1726 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1727 		aprint_error("%s: WARNING:"
   1728 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1729 		    device_xname(sc->sc_dev), reg);
   1730 	}
   1731 }
   1732 
   1733 static inline void
   1734 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1735 {
   1736 	wa->wa_low = htole32(v & 0xffffffffU);
   1737 	if (sizeof(bus_addr_t) == 8)
   1738 		wa->wa_high = htole32((uint64_t) v >> 32);
   1739 	else
   1740 		wa->wa_high = 0;
   1741 }
   1742 
   1743 /*
   1744  * Descriptor sync/init functions.
   1745  */
   1746 static inline void
   1747 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1748 {
   1749 	struct wm_softc *sc = txq->txq_sc;
   1750 
   1751 	/* If it will wrap around, sync to the end of the ring. */
   1752 	if ((start + num) > WM_NTXDESC(txq)) {
   1753 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1754 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1755 		    (WM_NTXDESC(txq) - start), ops);
   1756 		num -= (WM_NTXDESC(txq) - start);
   1757 		start = 0;
   1758 	}
   1759 
   1760 	/* Now sync whatever is left. */
   1761 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1762 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1763 }
   1764 
   1765 static inline void
   1766 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1767 {
   1768 	struct wm_softc *sc = rxq->rxq_sc;
   1769 
   1770 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1771 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1772 }
   1773 
   1774 static inline void
   1775 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1776 {
   1777 	struct wm_softc *sc = rxq->rxq_sc;
   1778 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1779 	struct mbuf *m = rxs->rxs_mbuf;
   1780 
   1781 	/*
   1782 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1783 	 * so that the payload after the Ethernet header is aligned
   1784 	 * to a 4-byte boundary.
   1785 
   1786 	 * XXX BRAINDAMAGE ALERT!
   1787 	 * The stupid chip uses the same size for every buffer, which
   1788 	 * is set in the Receive Control register.  We are using the 2K
   1789 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1790 	 * reason, we can't "scoot" packets longer than the standard
   1791 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1792 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1793 	 * the upper layer copy the headers.
   1794 	 */
   1795 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1796 
   1797 	if (sc->sc_type == WM_T_82574) {
   1798 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1799 		rxd->erx_data.erxd_addr =
   1800 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1801 		rxd->erx_data.erxd_dd = 0;
   1802 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1803 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1804 
   1805 		rxd->nqrx_data.nrxd_paddr =
   1806 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1807 		/* Currently, split header is not supported. */
   1808 		rxd->nqrx_data.nrxd_haddr = 0;
   1809 	} else {
   1810 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1811 
   1812 		wm_set_dma_addr(&rxd->wrx_addr,
   1813 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1814 		rxd->wrx_len = 0;
   1815 		rxd->wrx_cksum = 0;
   1816 		rxd->wrx_status = 0;
   1817 		rxd->wrx_errors = 0;
   1818 		rxd->wrx_special = 0;
   1819 	}
   1820 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1821 
   1822 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1823 }
   1824 
   1825 /*
   1826  * Device driver interface functions and commonly used functions.
   1827  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1828  */
   1829 
   1830 /* Lookup supported device table */
   1831 static const struct wm_product *
   1832 wm_lookup(const struct pci_attach_args *pa)
   1833 {
   1834 	const struct wm_product *wmp;
   1835 
   1836 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1837 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1838 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1839 			return wmp;
   1840 	}
   1841 	return NULL;
   1842 }
   1843 
   1844 /* The match function (ca_match) */
   1845 static int
   1846 wm_match(device_t parent, cfdata_t cf, void *aux)
   1847 {
   1848 	struct pci_attach_args *pa = aux;
   1849 
   1850 	if (wm_lookup(pa) != NULL)
   1851 		return 1;
   1852 
   1853 	return 0;
   1854 }
   1855 
   1856 /* The attach function (ca_attach) */
   1857 static void
   1858 wm_attach(device_t parent, device_t self, void *aux)
   1859 {
   1860 	struct wm_softc *sc = device_private(self);
   1861 	struct pci_attach_args *pa = aux;
   1862 	prop_dictionary_t dict;
   1863 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1864 	pci_chipset_tag_t pc = pa->pa_pc;
   1865 	int counts[PCI_INTR_TYPE_SIZE];
   1866 	pci_intr_type_t max_type;
   1867 	const char *eetype, *xname;
   1868 	bus_space_tag_t memt;
   1869 	bus_space_handle_t memh;
   1870 	bus_size_t memsize;
   1871 	int memh_valid;
   1872 	int i, error;
   1873 	const struct wm_product *wmp;
   1874 	prop_data_t ea;
   1875 	prop_number_t pn;
   1876 	uint8_t enaddr[ETHER_ADDR_LEN];
   1877 	char buf[256];
   1878 	char wqname[MAXCOMLEN];
   1879 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1880 	pcireg_t preg, memtype;
   1881 	uint16_t eeprom_data, apme_mask;
   1882 	bool force_clear_smbi;
   1883 	uint32_t link_mode;
   1884 	uint32_t reg;
   1885 
   1886 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1887 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1888 #endif
   1889 	sc->sc_dev = self;
   1890 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1891 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1892 	sc->sc_core_stopping = false;
   1893 
   1894 	wmp = wm_lookup(pa);
   1895 #ifdef DIAGNOSTIC
   1896 	if (wmp == NULL) {
   1897 		printf("\n");
   1898 		panic("wm_attach: impossible");
   1899 	}
   1900 #endif
   1901 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1902 
   1903 	sc->sc_pc = pa->pa_pc;
   1904 	sc->sc_pcitag = pa->pa_tag;
   1905 
   1906 	if (pci_dma64_available(pa))
   1907 		sc->sc_dmat = pa->pa_dmat64;
   1908 	else
   1909 		sc->sc_dmat = pa->pa_dmat;
   1910 
   1911 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1912 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1913 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1914 
   1915 	sc->sc_type = wmp->wmp_type;
   1916 
   1917 	/* Set default function pointers */
   1918 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1919 	sc->phy.release = sc->nvm.release = wm_put_null;
   1920 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1921 
   1922 	if (sc->sc_type < WM_T_82543) {
   1923 		if (sc->sc_rev < 2) {
   1924 			aprint_error_dev(sc->sc_dev,
   1925 			    "i82542 must be at least rev. 2\n");
   1926 			return;
   1927 		}
   1928 		if (sc->sc_rev < 3)
   1929 			sc->sc_type = WM_T_82542_2_0;
   1930 	}
   1931 
   1932 	/*
   1933 	 * Disable MSI for Errata:
   1934 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1935 	 *
   1936 	 *  82544: Errata 25
   1937 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1938 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1939 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1940 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1941 	 *
   1942 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1943 	 *
   1944 	 *  82571 & 82572: Errata 63
   1945 	 */
   1946 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1947 	    || (sc->sc_type == WM_T_82572))
   1948 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1949 
   1950 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1951 	    || (sc->sc_type == WM_T_82580)
   1952 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1953 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1954 		sc->sc_flags |= WM_F_NEWQUEUE;
   1955 
   1956 	/* Set device properties (mactype) */
   1957 	dict = device_properties(sc->sc_dev);
   1958 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1959 
   1960 	/*
   1961 	 * Map the device.  All devices support memory-mapped acccess,
   1962 	 * and it is really required for normal operation.
   1963 	 */
   1964 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1965 	switch (memtype) {
   1966 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1967 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1968 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1969 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1970 		break;
   1971 	default:
   1972 		memh_valid = 0;
   1973 		break;
   1974 	}
   1975 
   1976 	if (memh_valid) {
   1977 		sc->sc_st = memt;
   1978 		sc->sc_sh = memh;
   1979 		sc->sc_ss = memsize;
   1980 	} else {
   1981 		aprint_error_dev(sc->sc_dev,
   1982 		    "unable to map device registers\n");
   1983 		return;
   1984 	}
   1985 
   1986 	/*
   1987 	 * In addition, i82544 and later support I/O mapped indirect
   1988 	 * register access.  It is not desirable (nor supported in
   1989 	 * this driver) to use it for normal operation, though it is
   1990 	 * required to work around bugs in some chip versions.
   1991 	 */
   1992 	if (sc->sc_type >= WM_T_82544) {
   1993 		/* First we have to find the I/O BAR. */
   1994 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1995 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1996 			if (memtype == PCI_MAPREG_TYPE_IO)
   1997 				break;
   1998 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1999 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2000 				i += 4;	/* skip high bits, too */
   2001 		}
   2002 		if (i < PCI_MAPREG_END) {
   2003 			/*
   2004 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2005 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2006 			 * It's no problem because newer chips has no this
   2007 			 * bug.
   2008 			 *
   2009 			 * The i8254x doesn't apparently respond when the
   2010 			 * I/O BAR is 0, which looks somewhat like it's not
   2011 			 * been configured.
   2012 			 */
   2013 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2014 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2015 				aprint_error_dev(sc->sc_dev,
   2016 				    "WARNING: I/O BAR at zero.\n");
   2017 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2018 					0, &sc->sc_iot, &sc->sc_ioh,
   2019 					NULL, &sc->sc_ios) == 0) {
   2020 				sc->sc_flags |= WM_F_IOH_VALID;
   2021 			} else
   2022 				aprint_error_dev(sc->sc_dev,
   2023 				    "WARNING: unable to map I/O space\n");
   2024 		}
   2025 
   2026 	}
   2027 
   2028 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2029 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2030 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2031 	if (sc->sc_type < WM_T_82542_2_1)
   2032 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2033 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2034 
   2035 	/* Power up chip */
   2036 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2037 	    && error != EOPNOTSUPP) {
   2038 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2039 		return;
   2040 	}
   2041 
   2042 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2043 	/*
   2044 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2045 	 * resource.
   2046 	 */
   2047 	if (sc->sc_nqueues > 1) {
   2048 		max_type = PCI_INTR_TYPE_MSIX;
   2049 		/*
   2050 		 *  82583 has a MSI-X capability in the PCI configuration space
   2051 		 * but it doesn't support it. At least the document doesn't
   2052 		 * say anything about MSI-X.
   2053 		 */
   2054 		counts[PCI_INTR_TYPE_MSIX]
   2055 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2056 	} else {
   2057 		max_type = PCI_INTR_TYPE_MSI;
   2058 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2059 	}
   2060 
   2061 	/* Allocation settings */
   2062 	counts[PCI_INTR_TYPE_MSI] = 1;
   2063 	counts[PCI_INTR_TYPE_INTX] = 1;
   2064 	/* overridden by disable flags */
   2065 	if (wm_disable_msi != 0) {
   2066 		counts[PCI_INTR_TYPE_MSI] = 0;
   2067 		if (wm_disable_msix != 0) {
   2068 			max_type = PCI_INTR_TYPE_INTX;
   2069 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2070 		}
   2071 	} else if (wm_disable_msix != 0) {
   2072 		max_type = PCI_INTR_TYPE_MSI;
   2073 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2074 	}
   2075 
   2076 alloc_retry:
   2077 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2078 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2079 		return;
   2080 	}
   2081 
   2082 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2083 		error = wm_setup_msix(sc);
   2084 		if (error) {
   2085 			pci_intr_release(pc, sc->sc_intrs,
   2086 			    counts[PCI_INTR_TYPE_MSIX]);
   2087 
   2088 			/* Setup for MSI: Disable MSI-X */
   2089 			max_type = PCI_INTR_TYPE_MSI;
   2090 			counts[PCI_INTR_TYPE_MSI] = 1;
   2091 			counts[PCI_INTR_TYPE_INTX] = 1;
   2092 			goto alloc_retry;
   2093 		}
   2094 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2095 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2096 		error = wm_setup_legacy(sc);
   2097 		if (error) {
   2098 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2099 			    counts[PCI_INTR_TYPE_MSI]);
   2100 
   2101 			/* The next try is for INTx: Disable MSI */
   2102 			max_type = PCI_INTR_TYPE_INTX;
   2103 			counts[PCI_INTR_TYPE_INTX] = 1;
   2104 			goto alloc_retry;
   2105 		}
   2106 	} else {
   2107 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2108 		error = wm_setup_legacy(sc);
   2109 		if (error) {
   2110 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2111 			    counts[PCI_INTR_TYPE_INTX]);
   2112 			return;
   2113 		}
   2114 	}
   2115 
   2116 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2117 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2118 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2119 	    WM_WORKQUEUE_FLAGS);
   2120 	if (error) {
   2121 		aprint_error_dev(sc->sc_dev,
   2122 		    "unable to create workqueue\n");
   2123 		goto out;
   2124 	}
   2125 
   2126 	/*
   2127 	 * Check the function ID (unit number of the chip).
   2128 	 */
   2129 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2130 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2131 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2132 	    || (sc->sc_type == WM_T_82580)
   2133 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2134 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2135 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2136 	else
   2137 		sc->sc_funcid = 0;
   2138 
   2139 	/*
   2140 	 * Determine a few things about the bus we're connected to.
   2141 	 */
   2142 	if (sc->sc_type < WM_T_82543) {
   2143 		/* We don't really know the bus characteristics here. */
   2144 		sc->sc_bus_speed = 33;
   2145 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2146 		/*
   2147 		 * CSA (Communication Streaming Architecture) is about as fast
   2148 		 * a 32-bit 66MHz PCI Bus.
   2149 		 */
   2150 		sc->sc_flags |= WM_F_CSA;
   2151 		sc->sc_bus_speed = 66;
   2152 		aprint_verbose_dev(sc->sc_dev,
   2153 		    "Communication Streaming Architecture\n");
   2154 		if (sc->sc_type == WM_T_82547) {
   2155 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2156 			callout_setfunc(&sc->sc_txfifo_ch,
   2157 			    wm_82547_txfifo_stall, sc);
   2158 			aprint_verbose_dev(sc->sc_dev,
   2159 			    "using 82547 Tx FIFO stall work-around\n");
   2160 		}
   2161 	} else if (sc->sc_type >= WM_T_82571) {
   2162 		sc->sc_flags |= WM_F_PCIE;
   2163 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2164 		    && (sc->sc_type != WM_T_ICH10)
   2165 		    && (sc->sc_type != WM_T_PCH)
   2166 		    && (sc->sc_type != WM_T_PCH2)
   2167 		    && (sc->sc_type != WM_T_PCH_LPT)
   2168 		    && (sc->sc_type != WM_T_PCH_SPT)
   2169 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2170 			/* ICH* and PCH* have no PCIe capability registers */
   2171 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2172 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2173 				NULL) == 0)
   2174 				aprint_error_dev(sc->sc_dev,
   2175 				    "unable to find PCIe capability\n");
   2176 		}
   2177 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2178 	} else {
   2179 		reg = CSR_READ(sc, WMREG_STATUS);
   2180 		if (reg & STATUS_BUS64)
   2181 			sc->sc_flags |= WM_F_BUS64;
   2182 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2183 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2184 
   2185 			sc->sc_flags |= WM_F_PCIX;
   2186 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2187 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2188 				aprint_error_dev(sc->sc_dev,
   2189 				    "unable to find PCIX capability\n");
   2190 			else if (sc->sc_type != WM_T_82545_3 &&
   2191 				 sc->sc_type != WM_T_82546_3) {
   2192 				/*
   2193 				 * Work around a problem caused by the BIOS
   2194 				 * setting the max memory read byte count
   2195 				 * incorrectly.
   2196 				 */
   2197 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2198 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2199 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2200 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2201 
   2202 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2203 				    PCIX_CMD_BYTECNT_SHIFT;
   2204 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2205 				    PCIX_STATUS_MAXB_SHIFT;
   2206 				if (bytecnt > maxb) {
   2207 					aprint_verbose_dev(sc->sc_dev,
   2208 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2209 					    512 << bytecnt, 512 << maxb);
   2210 					pcix_cmd = (pcix_cmd &
   2211 					    ~PCIX_CMD_BYTECNT_MASK) |
   2212 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2213 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2214 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2215 					    pcix_cmd);
   2216 				}
   2217 			}
   2218 		}
   2219 		/*
   2220 		 * The quad port adapter is special; it has a PCIX-PCIX
   2221 		 * bridge on the board, and can run the secondary bus at
   2222 		 * a higher speed.
   2223 		 */
   2224 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2225 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2226 								      : 66;
   2227 		} else if (sc->sc_flags & WM_F_PCIX) {
   2228 			switch (reg & STATUS_PCIXSPD_MASK) {
   2229 			case STATUS_PCIXSPD_50_66:
   2230 				sc->sc_bus_speed = 66;
   2231 				break;
   2232 			case STATUS_PCIXSPD_66_100:
   2233 				sc->sc_bus_speed = 100;
   2234 				break;
   2235 			case STATUS_PCIXSPD_100_133:
   2236 				sc->sc_bus_speed = 133;
   2237 				break;
   2238 			default:
   2239 				aprint_error_dev(sc->sc_dev,
   2240 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2241 				    reg & STATUS_PCIXSPD_MASK);
   2242 				sc->sc_bus_speed = 66;
   2243 				break;
   2244 			}
   2245 		} else
   2246 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2247 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2248 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2249 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2250 	}
   2251 
   2252 	/* clear interesting stat counters */
   2253 	CSR_READ(sc, WMREG_COLC);
   2254 	CSR_READ(sc, WMREG_RXERRC);
   2255 
   2256 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2257 	    || (sc->sc_type >= WM_T_ICH8))
   2258 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2259 	if (sc->sc_type >= WM_T_ICH8)
   2260 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2261 
   2262 	/* Set PHY, NVM mutex related stuff */
   2263 	switch (sc->sc_type) {
   2264 	case WM_T_82542_2_0:
   2265 	case WM_T_82542_2_1:
   2266 	case WM_T_82543:
   2267 	case WM_T_82544:
   2268 		/* Microwire */
   2269 		sc->nvm.read = wm_nvm_read_uwire;
   2270 		sc->sc_nvm_wordsize = 64;
   2271 		sc->sc_nvm_addrbits = 6;
   2272 		break;
   2273 	case WM_T_82540:
   2274 	case WM_T_82545:
   2275 	case WM_T_82545_3:
   2276 	case WM_T_82546:
   2277 	case WM_T_82546_3:
   2278 		/* Microwire */
   2279 		sc->nvm.read = wm_nvm_read_uwire;
   2280 		reg = CSR_READ(sc, WMREG_EECD);
   2281 		if (reg & EECD_EE_SIZE) {
   2282 			sc->sc_nvm_wordsize = 256;
   2283 			sc->sc_nvm_addrbits = 8;
   2284 		} else {
   2285 			sc->sc_nvm_wordsize = 64;
   2286 			sc->sc_nvm_addrbits = 6;
   2287 		}
   2288 		sc->sc_flags |= WM_F_LOCK_EECD;
   2289 		sc->nvm.acquire = wm_get_eecd;
   2290 		sc->nvm.release = wm_put_eecd;
   2291 		break;
   2292 	case WM_T_82541:
   2293 	case WM_T_82541_2:
   2294 	case WM_T_82547:
   2295 	case WM_T_82547_2:
   2296 		reg = CSR_READ(sc, WMREG_EECD);
   2297 		/*
   2298 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2299 		 * on 8254[17], so set flags and functios before calling it.
   2300 		 */
   2301 		sc->sc_flags |= WM_F_LOCK_EECD;
   2302 		sc->nvm.acquire = wm_get_eecd;
   2303 		sc->nvm.release = wm_put_eecd;
   2304 		if (reg & EECD_EE_TYPE) {
   2305 			/* SPI */
   2306 			sc->nvm.read = wm_nvm_read_spi;
   2307 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2308 			wm_nvm_set_addrbits_size_eecd(sc);
   2309 		} else {
   2310 			/* Microwire */
   2311 			sc->nvm.read = wm_nvm_read_uwire;
   2312 			if ((reg & EECD_EE_ABITS) != 0) {
   2313 				sc->sc_nvm_wordsize = 256;
   2314 				sc->sc_nvm_addrbits = 8;
   2315 			} else {
   2316 				sc->sc_nvm_wordsize = 64;
   2317 				sc->sc_nvm_addrbits = 6;
   2318 			}
   2319 		}
   2320 		break;
   2321 	case WM_T_82571:
   2322 	case WM_T_82572:
   2323 		/* SPI */
   2324 		sc->nvm.read = wm_nvm_read_eerd;
   2325 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2326 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2327 		wm_nvm_set_addrbits_size_eecd(sc);
   2328 		sc->phy.acquire = wm_get_swsm_semaphore;
   2329 		sc->phy.release = wm_put_swsm_semaphore;
   2330 		sc->nvm.acquire = wm_get_nvm_82571;
   2331 		sc->nvm.release = wm_put_nvm_82571;
   2332 		break;
   2333 	case WM_T_82573:
   2334 	case WM_T_82574:
   2335 	case WM_T_82583:
   2336 		sc->nvm.read = wm_nvm_read_eerd;
   2337 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2338 		if (sc->sc_type == WM_T_82573) {
   2339 			sc->phy.acquire = wm_get_swsm_semaphore;
   2340 			sc->phy.release = wm_put_swsm_semaphore;
   2341 			sc->nvm.acquire = wm_get_nvm_82571;
   2342 			sc->nvm.release = wm_put_nvm_82571;
   2343 		} else {
   2344 			/* Both PHY and NVM use the same semaphore. */
   2345 			sc->phy.acquire = sc->nvm.acquire
   2346 			    = wm_get_swfwhw_semaphore;
   2347 			sc->phy.release = sc->nvm.release
   2348 			    = wm_put_swfwhw_semaphore;
   2349 		}
   2350 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2351 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2352 			sc->sc_nvm_wordsize = 2048;
   2353 		} else {
   2354 			/* SPI */
   2355 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2356 			wm_nvm_set_addrbits_size_eecd(sc);
   2357 		}
   2358 		break;
   2359 	case WM_T_82575:
   2360 	case WM_T_82576:
   2361 	case WM_T_82580:
   2362 	case WM_T_I350:
   2363 	case WM_T_I354:
   2364 	case WM_T_80003:
   2365 		/* SPI */
   2366 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2367 		wm_nvm_set_addrbits_size_eecd(sc);
   2368 		if ((sc->sc_type == WM_T_80003)
   2369 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2370 			sc->nvm.read = wm_nvm_read_eerd;
   2371 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2372 		} else {
   2373 			sc->nvm.read = wm_nvm_read_spi;
   2374 			sc->sc_flags |= WM_F_LOCK_EECD;
   2375 		}
   2376 		sc->phy.acquire = wm_get_phy_82575;
   2377 		sc->phy.release = wm_put_phy_82575;
   2378 		sc->nvm.acquire = wm_get_nvm_80003;
   2379 		sc->nvm.release = wm_put_nvm_80003;
   2380 		break;
   2381 	case WM_T_ICH8:
   2382 	case WM_T_ICH9:
   2383 	case WM_T_ICH10:
   2384 	case WM_T_PCH:
   2385 	case WM_T_PCH2:
   2386 	case WM_T_PCH_LPT:
   2387 		sc->nvm.read = wm_nvm_read_ich8;
   2388 		/* FLASH */
   2389 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2390 		sc->sc_nvm_wordsize = 2048;
   2391 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2392 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2393 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2394 			aprint_error_dev(sc->sc_dev,
   2395 			    "can't map FLASH registers\n");
   2396 			goto out;
   2397 		}
   2398 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2399 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2400 		    ICH_FLASH_SECTOR_SIZE;
   2401 		sc->sc_ich8_flash_bank_size =
   2402 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2403 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2404 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2405 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2406 		sc->sc_flashreg_offset = 0;
   2407 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2408 		sc->phy.release = wm_put_swflag_ich8lan;
   2409 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2410 		sc->nvm.release = wm_put_nvm_ich8lan;
   2411 		break;
   2412 	case WM_T_PCH_SPT:
   2413 	case WM_T_PCH_CNP:
   2414 		sc->nvm.read = wm_nvm_read_spt;
   2415 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2416 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2417 		sc->sc_flasht = sc->sc_st;
   2418 		sc->sc_flashh = sc->sc_sh;
   2419 		sc->sc_ich8_flash_base = 0;
   2420 		sc->sc_nvm_wordsize =
   2421 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2422 		    * NVM_SIZE_MULTIPLIER;
   2423 		/* It is size in bytes, we want words */
   2424 		sc->sc_nvm_wordsize /= 2;
   2425 		/* Assume 2 banks */
   2426 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2427 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2428 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2429 		sc->phy.release = wm_put_swflag_ich8lan;
   2430 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2431 		sc->nvm.release = wm_put_nvm_ich8lan;
   2432 		break;
   2433 	case WM_T_I210:
   2434 	case WM_T_I211:
   2435 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2436 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2437 		if (wm_nvm_flash_presence_i210(sc)) {
   2438 			sc->nvm.read = wm_nvm_read_eerd;
   2439 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2440 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2441 			wm_nvm_set_addrbits_size_eecd(sc);
   2442 		} else {
   2443 			sc->nvm.read = wm_nvm_read_invm;
   2444 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2445 			sc->sc_nvm_wordsize = INVM_SIZE;
   2446 		}
   2447 		sc->phy.acquire = wm_get_phy_82575;
   2448 		sc->phy.release = wm_put_phy_82575;
   2449 		sc->nvm.acquire = wm_get_nvm_80003;
   2450 		sc->nvm.release = wm_put_nvm_80003;
   2451 		break;
   2452 	default:
   2453 		break;
   2454 	}
   2455 
   2456 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2457 	switch (sc->sc_type) {
   2458 	case WM_T_82571:
   2459 	case WM_T_82572:
   2460 		reg = CSR_READ(sc, WMREG_SWSM2);
   2461 		if ((reg & SWSM2_LOCK) == 0) {
   2462 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2463 			force_clear_smbi = true;
   2464 		} else
   2465 			force_clear_smbi = false;
   2466 		break;
   2467 	case WM_T_82573:
   2468 	case WM_T_82574:
   2469 	case WM_T_82583:
   2470 		force_clear_smbi = true;
   2471 		break;
   2472 	default:
   2473 		force_clear_smbi = false;
   2474 		break;
   2475 	}
   2476 	if (force_clear_smbi) {
   2477 		reg = CSR_READ(sc, WMREG_SWSM);
   2478 		if ((reg & SWSM_SMBI) != 0)
   2479 			aprint_error_dev(sc->sc_dev,
   2480 			    "Please update the Bootagent\n");
   2481 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2482 	}
   2483 
   2484 	/*
   2485 	 * Defer printing the EEPROM type until after verifying the checksum
   2486 	 * This allows the EEPROM type to be printed correctly in the case
   2487 	 * that no EEPROM is attached.
   2488 	 */
   2489 	/*
   2490 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2491 	 * this for later, so we can fail future reads from the EEPROM.
   2492 	 */
   2493 	if (wm_nvm_validate_checksum(sc)) {
   2494 		/*
   2495 		 * Read twice again because some PCI-e parts fail the
   2496 		 * first check due to the link being in sleep state.
   2497 		 */
   2498 		if (wm_nvm_validate_checksum(sc))
   2499 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2500 	}
   2501 
   2502 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2503 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2504 	else {
   2505 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2506 		    sc->sc_nvm_wordsize);
   2507 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2508 			aprint_verbose("iNVM");
   2509 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2510 			aprint_verbose("FLASH(HW)");
   2511 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2512 			aprint_verbose("FLASH");
   2513 		else {
   2514 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2515 				eetype = "SPI";
   2516 			else
   2517 				eetype = "MicroWire";
   2518 			aprint_verbose("(%d address bits) %s EEPROM",
   2519 			    sc->sc_nvm_addrbits, eetype);
   2520 		}
   2521 	}
   2522 	wm_nvm_version(sc);
   2523 	aprint_verbose("\n");
   2524 
   2525 	/*
   2526 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2527 	 * incorrect.
   2528 	 */
   2529 	wm_gmii_setup_phytype(sc, 0, 0);
   2530 
   2531 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2532 	switch (sc->sc_type) {
   2533 	case WM_T_ICH8:
   2534 	case WM_T_ICH9:
   2535 	case WM_T_ICH10:
   2536 	case WM_T_PCH:
   2537 	case WM_T_PCH2:
   2538 	case WM_T_PCH_LPT:
   2539 	case WM_T_PCH_SPT:
   2540 	case WM_T_PCH_CNP:
   2541 		apme_mask = WUC_APME;
   2542 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2543 		if ((eeprom_data & apme_mask) != 0)
   2544 			sc->sc_flags |= WM_F_WOL;
   2545 		break;
   2546 	default:
   2547 		break;
   2548 	}
   2549 
   2550 	/* Reset the chip to a known state. */
   2551 	wm_reset(sc);
   2552 
   2553 	/*
   2554 	 * Check for I21[01] PLL workaround.
   2555 	 *
   2556 	 * Three cases:
   2557 	 * a) Chip is I211.
   2558 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2559 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2560 	 */
   2561 	if (sc->sc_type == WM_T_I211)
   2562 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2563 	if (sc->sc_type == WM_T_I210) {
   2564 		if (!wm_nvm_flash_presence_i210(sc))
   2565 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2566 		else if ((sc->sc_nvm_ver_major < 3)
   2567 		    || ((sc->sc_nvm_ver_major == 3)
   2568 			&& (sc->sc_nvm_ver_minor < 25))) {
   2569 			aprint_verbose_dev(sc->sc_dev,
   2570 			    "ROM image version %d.%d is older than 3.25\n",
   2571 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2572 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2573 		}
   2574 	}
   2575 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2576 		wm_pll_workaround_i210(sc);
   2577 
   2578 	wm_get_wakeup(sc);
   2579 
   2580 	/* Non-AMT based hardware can now take control from firmware */
   2581 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2582 		wm_get_hw_control(sc);
   2583 
   2584 	/*
   2585 	 * Read the Ethernet address from the EEPROM, if not first found
   2586 	 * in device properties.
   2587 	 */
   2588 	ea = prop_dictionary_get(dict, "mac-address");
   2589 	if (ea != NULL) {
   2590 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2591 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2592 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2593 	} else {
   2594 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2595 			aprint_error_dev(sc->sc_dev,
   2596 			    "unable to read Ethernet address\n");
   2597 			goto out;
   2598 		}
   2599 	}
   2600 
   2601 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2602 	    ether_sprintf(enaddr));
   2603 
   2604 	/*
   2605 	 * Read the config info from the EEPROM, and set up various
   2606 	 * bits in the control registers based on their contents.
   2607 	 */
   2608 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2609 	if (pn != NULL) {
   2610 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2611 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2612 	} else {
   2613 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2614 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2615 			goto out;
   2616 		}
   2617 	}
   2618 
   2619 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2620 	if (pn != NULL) {
   2621 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2622 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2623 	} else {
   2624 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2625 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2626 			goto out;
   2627 		}
   2628 	}
   2629 
   2630 	/* check for WM_F_WOL */
   2631 	switch (sc->sc_type) {
   2632 	case WM_T_82542_2_0:
   2633 	case WM_T_82542_2_1:
   2634 	case WM_T_82543:
   2635 		/* dummy? */
   2636 		eeprom_data = 0;
   2637 		apme_mask = NVM_CFG3_APME;
   2638 		break;
   2639 	case WM_T_82544:
   2640 		apme_mask = NVM_CFG2_82544_APM_EN;
   2641 		eeprom_data = cfg2;
   2642 		break;
   2643 	case WM_T_82546:
   2644 	case WM_T_82546_3:
   2645 	case WM_T_82571:
   2646 	case WM_T_82572:
   2647 	case WM_T_82573:
   2648 	case WM_T_82574:
   2649 	case WM_T_82583:
   2650 	case WM_T_80003:
   2651 	case WM_T_82575:
   2652 	case WM_T_82576:
   2653 		apme_mask = NVM_CFG3_APME;
   2654 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2655 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2656 		break;
   2657 	case WM_T_82580:
   2658 	case WM_T_I350:
   2659 	case WM_T_I354:
   2660 	case WM_T_I210:
   2661 	case WM_T_I211:
   2662 		apme_mask = NVM_CFG3_APME;
   2663 		wm_nvm_read(sc,
   2664 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2665 		    1, &eeprom_data);
   2666 		break;
   2667 	case WM_T_ICH8:
   2668 	case WM_T_ICH9:
   2669 	case WM_T_ICH10:
   2670 	case WM_T_PCH:
   2671 	case WM_T_PCH2:
   2672 	case WM_T_PCH_LPT:
   2673 	case WM_T_PCH_SPT:
   2674 	case WM_T_PCH_CNP:
   2675 		/* Already checked before wm_reset () */
   2676 		apme_mask = eeprom_data = 0;
   2677 		break;
   2678 	default: /* XXX 82540 */
   2679 		apme_mask = NVM_CFG3_APME;
   2680 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2681 		break;
   2682 	}
   2683 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2684 	if ((eeprom_data & apme_mask) != 0)
   2685 		sc->sc_flags |= WM_F_WOL;
   2686 
   2687 	/*
   2688 	 * We have the eeprom settings, now apply the special cases
   2689 	 * where the eeprom may be wrong or the board won't support
   2690 	 * wake on lan on a particular port
   2691 	 */
   2692 	switch (sc->sc_pcidevid) {
   2693 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2694 		sc->sc_flags &= ~WM_F_WOL;
   2695 		break;
   2696 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2697 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2698 		/* Wake events only supported on port A for dual fiber
   2699 		 * regardless of eeprom setting */
   2700 		if (sc->sc_funcid == 1)
   2701 			sc->sc_flags &= ~WM_F_WOL;
   2702 		break;
   2703 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2704 		/* If quad port adapter, disable WoL on all but port A */
   2705 		if (sc->sc_funcid != 0)
   2706 			sc->sc_flags &= ~WM_F_WOL;
   2707 		break;
   2708 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2709 		/* Wake events only supported on port A for dual fiber
   2710 		 * regardless of eeprom setting */
   2711 		if (sc->sc_funcid == 1)
   2712 			sc->sc_flags &= ~WM_F_WOL;
   2713 		break;
   2714 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2715 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2716 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2717 		/* If quad port adapter, disable WoL on all but port A */
   2718 		if (sc->sc_funcid != 0)
   2719 			sc->sc_flags &= ~WM_F_WOL;
   2720 		break;
   2721 	}
   2722 
   2723 	if (sc->sc_type >= WM_T_82575) {
   2724 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2725 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2726 			    nvmword);
   2727 			if ((sc->sc_type == WM_T_82575) ||
   2728 			    (sc->sc_type == WM_T_82576)) {
   2729 				/* Check NVM for autonegotiation */
   2730 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2731 				    != 0)
   2732 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2733 			}
   2734 			if ((sc->sc_type == WM_T_82575) ||
   2735 			    (sc->sc_type == WM_T_I350)) {
   2736 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2737 					sc->sc_flags |= WM_F_MAS;
   2738 			}
   2739 		}
   2740 	}
   2741 
   2742 	/*
   2743 	 * XXX need special handling for some multiple port cards
   2744 	 * to disable a paticular port.
   2745 	 */
   2746 
   2747 	if (sc->sc_type >= WM_T_82544) {
   2748 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2749 		if (pn != NULL) {
   2750 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2751 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2752 		} else {
   2753 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2754 				aprint_error_dev(sc->sc_dev,
   2755 				    "unable to read SWDPIN\n");
   2756 				goto out;
   2757 			}
   2758 		}
   2759 	}
   2760 
   2761 	if (cfg1 & NVM_CFG1_ILOS)
   2762 		sc->sc_ctrl |= CTRL_ILOS;
   2763 
   2764 	/*
   2765 	 * XXX
   2766 	 * This code isn't correct because pin 2 and 3 are located
   2767 	 * in different position on newer chips. Check all datasheet.
   2768 	 *
   2769 	 * Until resolve this problem, check if a chip < 82580
   2770 	 */
   2771 	if (sc->sc_type <= WM_T_82580) {
   2772 		if (sc->sc_type >= WM_T_82544) {
   2773 			sc->sc_ctrl |=
   2774 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2775 			    CTRL_SWDPIO_SHIFT;
   2776 			sc->sc_ctrl |=
   2777 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2778 			    CTRL_SWDPINS_SHIFT;
   2779 		} else {
   2780 			sc->sc_ctrl |=
   2781 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2782 			    CTRL_SWDPIO_SHIFT;
   2783 		}
   2784 	}
   2785 
   2786 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2787 		wm_nvm_read(sc,
   2788 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2789 		    1, &nvmword);
   2790 		if (nvmword & NVM_CFG3_ILOS)
   2791 			sc->sc_ctrl |= CTRL_ILOS;
   2792 	}
   2793 
   2794 #if 0
   2795 	if (sc->sc_type >= WM_T_82544) {
   2796 		if (cfg1 & NVM_CFG1_IPS0)
   2797 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2798 		if (cfg1 & NVM_CFG1_IPS1)
   2799 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2800 		sc->sc_ctrl_ext |=
   2801 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2802 		    CTRL_EXT_SWDPIO_SHIFT;
   2803 		sc->sc_ctrl_ext |=
   2804 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2805 		    CTRL_EXT_SWDPINS_SHIFT;
   2806 	} else {
   2807 		sc->sc_ctrl_ext |=
   2808 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2809 		    CTRL_EXT_SWDPIO_SHIFT;
   2810 	}
   2811 #endif
   2812 
   2813 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2814 #if 0
   2815 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2816 #endif
   2817 
   2818 	if (sc->sc_type == WM_T_PCH) {
   2819 		uint16_t val;
   2820 
   2821 		/* Save the NVM K1 bit setting */
   2822 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2823 
   2824 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2825 			sc->sc_nvm_k1_enabled = 1;
   2826 		else
   2827 			sc->sc_nvm_k1_enabled = 0;
   2828 	}
   2829 
   2830 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2831 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2832 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2833 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2834 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2835 	    || sc->sc_type == WM_T_82573
   2836 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2837 		/* Copper only */
   2838 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2839 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2840 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2841 	    || (sc->sc_type ==WM_T_I211)) {
   2842 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2843 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2844 		switch (link_mode) {
   2845 		case CTRL_EXT_LINK_MODE_1000KX:
   2846 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2847 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2848 			break;
   2849 		case CTRL_EXT_LINK_MODE_SGMII:
   2850 			if (wm_sgmii_uses_mdio(sc)) {
   2851 				aprint_normal_dev(sc->sc_dev,
   2852 				    "SGMII(MDIO)\n");
   2853 				sc->sc_flags |= WM_F_SGMII;
   2854 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2855 				break;
   2856 			}
   2857 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2858 			/*FALLTHROUGH*/
   2859 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2860 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2861 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2862 				if (link_mode
   2863 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2864 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2865 					sc->sc_flags |= WM_F_SGMII;
   2866 					aprint_verbose_dev(sc->sc_dev,
   2867 					    "SGMII\n");
   2868 				} else {
   2869 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2870 					aprint_verbose_dev(sc->sc_dev,
   2871 					    "SERDES\n");
   2872 				}
   2873 				break;
   2874 			}
   2875 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2876 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2877 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2878 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2879 				sc->sc_flags |= WM_F_SGMII;
   2880 			}
   2881 			/* Do not change link mode for 100BaseFX */
   2882 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2883 				break;
   2884 
   2885 			/* Change current link mode setting */
   2886 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2887 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2888 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2889 			else
   2890 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2891 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2892 			break;
   2893 		case CTRL_EXT_LINK_MODE_GMII:
   2894 		default:
   2895 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2896 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2897 			break;
   2898 		}
   2899 
   2900 		reg &= ~CTRL_EXT_I2C_ENA;
   2901 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2902 			reg |= CTRL_EXT_I2C_ENA;
   2903 		else
   2904 			reg &= ~CTRL_EXT_I2C_ENA;
   2905 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2906 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2907 			if (!wm_sgmii_uses_mdio(sc))
   2908 				wm_gmii_setup_phytype(sc, 0, 0);
   2909 			wm_reset_mdicnfg_82580(sc);
   2910 		}
   2911 	} else if (sc->sc_type < WM_T_82543 ||
   2912 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2913 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2914 			aprint_error_dev(sc->sc_dev,
   2915 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2916 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2917 		}
   2918 	} else {
   2919 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2920 			aprint_error_dev(sc->sc_dev,
   2921 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2922 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2923 		}
   2924 	}
   2925 
   2926 	if (sc->sc_type >= WM_T_PCH2)
   2927 		sc->sc_flags |= WM_F_EEE;
   2928 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2929 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2930 		/* XXX: Need special handling for I354. (not yet) */
   2931 		if (sc->sc_type != WM_T_I354)
   2932 			sc->sc_flags |= WM_F_EEE;
   2933 	}
   2934 
   2935 	/*
   2936 	 * The I350 has a bug where it always strips the CRC whether
   2937 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   2938 	 */
   2939 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2940 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2941 		sc->sc_flags |= WM_F_CRC_STRIP;
   2942 
   2943 	/* Set device properties (macflags) */
   2944 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2945 
   2946 	if (sc->sc_flags != 0) {
   2947 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2948 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2949 	}
   2950 
   2951 #ifdef WM_MPSAFE
   2952 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2953 #else
   2954 	sc->sc_core_lock = NULL;
   2955 #endif
   2956 
   2957 	/* Initialize the media structures accordingly. */
   2958 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2959 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2960 	else
   2961 		wm_tbi_mediainit(sc); /* All others */
   2962 
   2963 	ifp = &sc->sc_ethercom.ec_if;
   2964 	xname = device_xname(sc->sc_dev);
   2965 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2966 	ifp->if_softc = sc;
   2967 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2968 #ifdef WM_MPSAFE
   2969 	ifp->if_extflags = IFEF_MPSAFE;
   2970 #endif
   2971 	ifp->if_ioctl = wm_ioctl;
   2972 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2973 		ifp->if_start = wm_nq_start;
   2974 		/*
   2975 		 * When the number of CPUs is one and the controller can use
   2976 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2977 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2978 		 * and the other is used for link status changing.
   2979 		 * In this situation, wm_nq_transmit() is disadvantageous
   2980 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2981 		 */
   2982 		if (wm_is_using_multiqueue(sc))
   2983 			ifp->if_transmit = wm_nq_transmit;
   2984 	} else {
   2985 		ifp->if_start = wm_start;
   2986 		/*
   2987 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2988 		 */
   2989 		if (wm_is_using_multiqueue(sc))
   2990 			ifp->if_transmit = wm_transmit;
   2991 	}
   2992 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2993 	ifp->if_init = wm_init;
   2994 	ifp->if_stop = wm_stop;
   2995 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2996 	IFQ_SET_READY(&ifp->if_snd);
   2997 
   2998 	/* Check for jumbo frame */
   2999 	switch (sc->sc_type) {
   3000 	case WM_T_82573:
   3001 		/* XXX limited to 9234 if ASPM is disabled */
   3002 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3003 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3004 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3005 		break;
   3006 	case WM_T_82571:
   3007 	case WM_T_82572:
   3008 	case WM_T_82574:
   3009 	case WM_T_82583:
   3010 	case WM_T_82575:
   3011 	case WM_T_82576:
   3012 	case WM_T_82580:
   3013 	case WM_T_I350:
   3014 	case WM_T_I354:
   3015 	case WM_T_I210:
   3016 	case WM_T_I211:
   3017 	case WM_T_80003:
   3018 	case WM_T_ICH9:
   3019 	case WM_T_ICH10:
   3020 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3021 	case WM_T_PCH_LPT:
   3022 	case WM_T_PCH_SPT:
   3023 	case WM_T_PCH_CNP:
   3024 		/* XXX limited to 9234 */
   3025 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3026 		break;
   3027 	case WM_T_PCH:
   3028 		/* XXX limited to 4096 */
   3029 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3030 		break;
   3031 	case WM_T_82542_2_0:
   3032 	case WM_T_82542_2_1:
   3033 	case WM_T_ICH8:
   3034 		/* No support for jumbo frame */
   3035 		break;
   3036 	default:
   3037 		/* ETHER_MAX_LEN_JUMBO */
   3038 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3039 		break;
   3040 	}
   3041 
   3042 	/* If we're a i82543 or greater, we can support VLANs. */
   3043 	if (sc->sc_type >= WM_T_82543) {
   3044 		sc->sc_ethercom.ec_capabilities |=
   3045 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3046 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3047 	}
   3048 
   3049 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3050 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3051 
   3052 	/*
   3053 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   3054 	 * on i82543 and later.
   3055 	 */
   3056 	if (sc->sc_type >= WM_T_82543) {
   3057 		ifp->if_capabilities |=
   3058 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3059 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3060 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3061 		    IFCAP_CSUM_TCPv6_Tx |
   3062 		    IFCAP_CSUM_UDPv6_Tx;
   3063 	}
   3064 
   3065 	/*
   3066 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3067 	 *
   3068 	 *	82541GI (8086:1076) ... no
   3069 	 *	82572EI (8086:10b9) ... yes
   3070 	 */
   3071 	if (sc->sc_type >= WM_T_82571) {
   3072 		ifp->if_capabilities |=
   3073 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3074 	}
   3075 
   3076 	/*
   3077 	 * If we're a i82544 or greater (except i82547), we can do
   3078 	 * TCP segmentation offload.
   3079 	 */
   3080 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3081 		ifp->if_capabilities |= IFCAP_TSOv4;
   3082 	}
   3083 
   3084 	if (sc->sc_type >= WM_T_82571) {
   3085 		ifp->if_capabilities |= IFCAP_TSOv6;
   3086 	}
   3087 
   3088 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3089 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3090 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3091 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3092 
   3093 	/* Attach the interface. */
   3094 	error = if_initialize(ifp);
   3095 	if (error != 0) {
   3096 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   3097 		    error);
   3098 		return; /* Error */
   3099 	}
   3100 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3101 	ether_ifattach(ifp, enaddr);
   3102 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3103 	if_register(ifp);
   3104 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3105 	    RND_FLAG_DEFAULT);
   3106 
   3107 #ifdef WM_EVENT_COUNTERS
   3108 	/* Attach event counters. */
   3109 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3110 	    NULL, xname, "linkintr");
   3111 
   3112 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3113 	    NULL, xname, "tx_xoff");
   3114 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3115 	    NULL, xname, "tx_xon");
   3116 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3117 	    NULL, xname, "rx_xoff");
   3118 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3119 	    NULL, xname, "rx_xon");
   3120 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3121 	    NULL, xname, "rx_macctl");
   3122 #endif /* WM_EVENT_COUNTERS */
   3123 
   3124 	sc->sc_txrx_use_workqueue = false;
   3125 
   3126 	if (wm_phy_need_linkdown_discard(sc))
   3127 		wm_set_linkdown_discard(sc);
   3128 
   3129 	wm_init_sysctls(sc);
   3130 
   3131 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3132 		pmf_class_network_register(self, ifp);
   3133 	else
   3134 		aprint_error_dev(self, "couldn't establish power handler\n");
   3135 
   3136 	sc->sc_flags |= WM_F_ATTACHED;
   3137 out:
   3138 	return;
   3139 }
   3140 
   3141 /* The detach function (ca_detach) */
   3142 static int
   3143 wm_detach(device_t self, int flags __unused)
   3144 {
   3145 	struct wm_softc *sc = device_private(self);
   3146 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3147 	int i;
   3148 
   3149 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3150 		return 0;
   3151 
   3152 	/* Stop the interface. Callouts are stopped in it. */
   3153 	wm_stop(ifp, 1);
   3154 
   3155 	pmf_device_deregister(self);
   3156 
   3157 	sysctl_teardown(&sc->sc_sysctllog);
   3158 
   3159 #ifdef WM_EVENT_COUNTERS
   3160 	evcnt_detach(&sc->sc_ev_linkintr);
   3161 
   3162 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3163 	evcnt_detach(&sc->sc_ev_tx_xon);
   3164 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3165 	evcnt_detach(&sc->sc_ev_rx_xon);
   3166 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3167 #endif /* WM_EVENT_COUNTERS */
   3168 
   3169 	rnd_detach_source(&sc->rnd_source);
   3170 
   3171 	/* Tell the firmware about the release */
   3172 	WM_CORE_LOCK(sc);
   3173 	wm_release_manageability(sc);
   3174 	wm_release_hw_control(sc);
   3175 	wm_enable_wakeup(sc);
   3176 	WM_CORE_UNLOCK(sc);
   3177 
   3178 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3179 
   3180 	ether_ifdetach(ifp);
   3181 	if_detach(ifp);
   3182 	if_percpuq_destroy(sc->sc_ipq);
   3183 
   3184 	/* Delete all remaining media. */
   3185 	ifmedia_fini(&sc->sc_mii.mii_media);
   3186 
   3187 	/* Unload RX dmamaps and free mbufs */
   3188 	for (i = 0; i < sc->sc_nqueues; i++) {
   3189 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3190 		mutex_enter(rxq->rxq_lock);
   3191 		wm_rxdrain(rxq);
   3192 		mutex_exit(rxq->rxq_lock);
   3193 	}
   3194 	/* Must unlock here */
   3195 
   3196 	/* Disestablish the interrupt handler */
   3197 	for (i = 0; i < sc->sc_nintrs; i++) {
   3198 		if (sc->sc_ihs[i] != NULL) {
   3199 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3200 			sc->sc_ihs[i] = NULL;
   3201 		}
   3202 	}
   3203 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3204 
   3205 	/* wm_stop() ensure workqueue is stopped. */
   3206 	workqueue_destroy(sc->sc_queue_wq);
   3207 
   3208 	for (i = 0; i < sc->sc_nqueues; i++)
   3209 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3210 
   3211 	wm_free_txrx_queues(sc);
   3212 
   3213 	/* Unmap the registers */
   3214 	if (sc->sc_ss) {
   3215 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3216 		sc->sc_ss = 0;
   3217 	}
   3218 	if (sc->sc_ios) {
   3219 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3220 		sc->sc_ios = 0;
   3221 	}
   3222 	if (sc->sc_flashs) {
   3223 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3224 		sc->sc_flashs = 0;
   3225 	}
   3226 
   3227 	if (sc->sc_core_lock)
   3228 		mutex_obj_free(sc->sc_core_lock);
   3229 	if (sc->sc_ich_phymtx)
   3230 		mutex_obj_free(sc->sc_ich_phymtx);
   3231 	if (sc->sc_ich_nvmmtx)
   3232 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3233 
   3234 	return 0;
   3235 }
   3236 
   3237 static bool
   3238 wm_suspend(device_t self, const pmf_qual_t *qual)
   3239 {
   3240 	struct wm_softc *sc = device_private(self);
   3241 
   3242 	wm_release_manageability(sc);
   3243 	wm_release_hw_control(sc);
   3244 	wm_enable_wakeup(sc);
   3245 
   3246 	return true;
   3247 }
   3248 
   3249 static bool
   3250 wm_resume(device_t self, const pmf_qual_t *qual)
   3251 {
   3252 	struct wm_softc *sc = device_private(self);
   3253 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3254 	pcireg_t reg;
   3255 	char buf[256];
   3256 
   3257 	reg = CSR_READ(sc, WMREG_WUS);
   3258 	if (reg != 0) {
   3259 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3260 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3261 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3262 	}
   3263 
   3264 	if (sc->sc_type >= WM_T_PCH2)
   3265 		wm_resume_workarounds_pchlan(sc);
   3266 	if ((ifp->if_flags & IFF_UP) == 0) {
   3267 		wm_reset(sc);
   3268 		/* Non-AMT based hardware can now take control from firmware */
   3269 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3270 			wm_get_hw_control(sc);
   3271 		wm_init_manageability(sc);
   3272 	} else {
   3273 		/*
   3274 		 * We called pmf_class_network_register(), so if_init() is
   3275 		 * automatically called when IFF_UP. wm_reset(),
   3276 		 * wm_get_hw_control() and wm_init_manageability() are called
   3277 		 * via wm_init().
   3278 		 */
   3279 	}
   3280 
   3281 	return true;
   3282 }
   3283 
   3284 /*
   3285  * wm_watchdog:		[ifnet interface function]
   3286  *
   3287  *	Watchdog timer handler.
   3288  */
   3289 static void
   3290 wm_watchdog(struct ifnet *ifp)
   3291 {
   3292 	int qid;
   3293 	struct wm_softc *sc = ifp->if_softc;
   3294 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3295 
   3296 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3297 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3298 
   3299 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3300 	}
   3301 
   3302 	/* IF any of queues hanged up, reset the interface. */
   3303 	if (hang_queue != 0) {
   3304 		(void)wm_init(ifp);
   3305 
   3306 		/*
   3307 		 * There are still some upper layer processing which call
   3308 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3309 		 */
   3310 		/* Try to get more packets going. */
   3311 		ifp->if_start(ifp);
   3312 	}
   3313 }
   3314 
   3315 
   3316 static void
   3317 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3318 {
   3319 
   3320 	mutex_enter(txq->txq_lock);
   3321 	if (txq->txq_sending &&
   3322 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3323 		wm_watchdog_txq_locked(ifp, txq, hang);
   3324 
   3325 	mutex_exit(txq->txq_lock);
   3326 }
   3327 
   3328 static void
   3329 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3330     uint16_t *hang)
   3331 {
   3332 	struct wm_softc *sc = ifp->if_softc;
   3333 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3334 
   3335 	KASSERT(mutex_owned(txq->txq_lock));
   3336 
   3337 	/*
   3338 	 * Since we're using delayed interrupts, sweep up
   3339 	 * before we report an error.
   3340 	 */
   3341 	wm_txeof(txq, UINT_MAX);
   3342 
   3343 	if (txq->txq_sending)
   3344 		*hang |= __BIT(wmq->wmq_id);
   3345 
   3346 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3347 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3348 		    device_xname(sc->sc_dev));
   3349 	} else {
   3350 #ifdef WM_DEBUG
   3351 		int i, j;
   3352 		struct wm_txsoft *txs;
   3353 #endif
   3354 		log(LOG_ERR,
   3355 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3356 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3357 		    txq->txq_next);
   3358 		if_statinc(ifp, if_oerrors);
   3359 #ifdef WM_DEBUG
   3360 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3361 		    i = WM_NEXTTXS(txq, i)) {
   3362 			txs = &txq->txq_soft[i];
   3363 			printf("txs %d tx %d -> %d\n",
   3364 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3365 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3366 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3367 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3368 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3369 					printf("\t %#08x%08x\n",
   3370 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3371 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3372 				} else {
   3373 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3374 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3375 					    txq->txq_descs[j].wtx_addr.wa_low);
   3376 					printf("\t %#04x%02x%02x%08x\n",
   3377 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3378 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3379 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3380 					    txq->txq_descs[j].wtx_cmdlen);
   3381 				}
   3382 				if (j == txs->txs_lastdesc)
   3383 					break;
   3384 			}
   3385 		}
   3386 #endif
   3387 	}
   3388 }
   3389 
   3390 /*
   3391  * wm_tick:
   3392  *
   3393  *	One second timer, used to check link status, sweep up
   3394  *	completed transmit jobs, etc.
   3395  */
   3396 static void
   3397 wm_tick(void *arg)
   3398 {
   3399 	struct wm_softc *sc = arg;
   3400 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3401 #ifndef WM_MPSAFE
   3402 	int s = splnet();
   3403 #endif
   3404 
   3405 	WM_CORE_LOCK(sc);
   3406 
   3407 	if (sc->sc_core_stopping) {
   3408 		WM_CORE_UNLOCK(sc);
   3409 #ifndef WM_MPSAFE
   3410 		splx(s);
   3411 #endif
   3412 		return;
   3413 	}
   3414 
   3415 	if (sc->sc_type >= WM_T_82542_2_1) {
   3416 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3417 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3418 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3419 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3420 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3421 	}
   3422 
   3423 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3424 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3425 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3426 	    + CSR_READ(sc, WMREG_CRCERRS)
   3427 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3428 	    + CSR_READ(sc, WMREG_SYMERRC)
   3429 	    + CSR_READ(sc, WMREG_RXERRC)
   3430 	    + CSR_READ(sc, WMREG_SEC)
   3431 	    + CSR_READ(sc, WMREG_CEXTERR)
   3432 	    + CSR_READ(sc, WMREG_RLEC));
   3433 	/*
   3434 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3435 	 * memory. It does not mean the number of dropped packet. Because
   3436 	 * ethernet controller can receive packets in such case if there is
   3437 	 * space in phy's FIFO.
   3438 	 *
   3439 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3440 	 * own EVCNT instead of if_iqdrops.
   3441 	 */
   3442 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3443 	IF_STAT_PUTREF(ifp);
   3444 
   3445 	if (sc->sc_flags & WM_F_HAS_MII)
   3446 		mii_tick(&sc->sc_mii);
   3447 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3448 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3449 		wm_serdes_tick(sc);
   3450 	else
   3451 		wm_tbi_tick(sc);
   3452 
   3453 	WM_CORE_UNLOCK(sc);
   3454 
   3455 	wm_watchdog(ifp);
   3456 
   3457 	callout_schedule(&sc->sc_tick_ch, hz);
   3458 }
   3459 
   3460 static int
   3461 wm_ifflags_cb(struct ethercom *ec)
   3462 {
   3463 	struct ifnet *ifp = &ec->ec_if;
   3464 	struct wm_softc *sc = ifp->if_softc;
   3465 	u_short iffchange;
   3466 	int ecchange;
   3467 	bool needreset = false;
   3468 	int rc = 0;
   3469 
   3470 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3471 		device_xname(sc->sc_dev), __func__));
   3472 
   3473 	WM_CORE_LOCK(sc);
   3474 
   3475 	/*
   3476 	 * Check for if_flags.
   3477 	 * Main usage is to prevent linkdown when opening bpf.
   3478 	 */
   3479 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3480 	sc->sc_if_flags = ifp->if_flags;
   3481 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3482 		needreset = true;
   3483 		goto ec;
   3484 	}
   3485 
   3486 	/* iff related updates */
   3487 	if ((iffchange & IFF_PROMISC) != 0)
   3488 		wm_set_filter(sc);
   3489 
   3490 	wm_set_vlan(sc);
   3491 
   3492 ec:
   3493 	/* Check for ec_capenable. */
   3494 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3495 	sc->sc_ec_capenable = ec->ec_capenable;
   3496 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3497 		needreset = true;
   3498 		goto out;
   3499 	}
   3500 
   3501 	/* ec related updates */
   3502 	wm_set_eee(sc);
   3503 
   3504 out:
   3505 	if (needreset)
   3506 		rc = ENETRESET;
   3507 	WM_CORE_UNLOCK(sc);
   3508 
   3509 	return rc;
   3510 }
   3511 
   3512 static bool
   3513 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3514 {
   3515 
   3516 	switch (sc->sc_phytype) {
   3517 	case WMPHY_82577: /* ihphy */
   3518 	case WMPHY_82578: /* atphy */
   3519 	case WMPHY_82579: /* ihphy */
   3520 	case WMPHY_I217: /* ihphy */
   3521 	case WMPHY_82580: /* ihphy */
   3522 	case WMPHY_I350: /* ihphy */
   3523 		return true;
   3524 	default:
   3525 		return false;
   3526 	}
   3527 }
   3528 
   3529 static void
   3530 wm_set_linkdown_discard(struct wm_softc *sc)
   3531 {
   3532 
   3533 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3534 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3535 
   3536 		mutex_enter(txq->txq_lock);
   3537 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3538 		mutex_exit(txq->txq_lock);
   3539 	}
   3540 }
   3541 
   3542 static void
   3543 wm_clear_linkdown_discard(struct wm_softc *sc)
   3544 {
   3545 
   3546 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3547 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3548 
   3549 		mutex_enter(txq->txq_lock);
   3550 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   3551 		mutex_exit(txq->txq_lock);
   3552 	}
   3553 }
   3554 
   3555 /*
   3556  * wm_ioctl:		[ifnet interface function]
   3557  *
   3558  *	Handle control requests from the operator.
   3559  */
   3560 static int
   3561 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3562 {
   3563 	struct wm_softc *sc = ifp->if_softc;
   3564 	struct ifreq *ifr = (struct ifreq *)data;
   3565 	struct ifaddr *ifa = (struct ifaddr *)data;
   3566 	struct sockaddr_dl *sdl;
   3567 	int s, error;
   3568 
   3569 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3570 		device_xname(sc->sc_dev), __func__));
   3571 
   3572 #ifndef WM_MPSAFE
   3573 	s = splnet();
   3574 #endif
   3575 	switch (cmd) {
   3576 	case SIOCSIFMEDIA:
   3577 		WM_CORE_LOCK(sc);
   3578 		/* Flow control requires full-duplex mode. */
   3579 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3580 		    (ifr->ifr_media & IFM_FDX) == 0)
   3581 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3582 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3583 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3584 				/* We can do both TXPAUSE and RXPAUSE. */
   3585 				ifr->ifr_media |=
   3586 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3587 			}
   3588 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3589 		}
   3590 		WM_CORE_UNLOCK(sc);
   3591 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3592 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   3593 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE)
   3594 				wm_set_linkdown_discard(sc);
   3595 			else
   3596 				wm_clear_linkdown_discard(sc);
   3597 		}
   3598 		break;
   3599 	case SIOCINITIFADDR:
   3600 		WM_CORE_LOCK(sc);
   3601 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3602 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3603 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3604 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3605 			/* Unicast address is the first multicast entry */
   3606 			wm_set_filter(sc);
   3607 			error = 0;
   3608 			WM_CORE_UNLOCK(sc);
   3609 			break;
   3610 		}
   3611 		WM_CORE_UNLOCK(sc);
   3612 		if (((ifp->if_flags & IFF_UP) == 0) && wm_phy_need_linkdown_discard(sc))
   3613 			wm_clear_linkdown_discard(sc);
   3614 		/*FALLTHROUGH*/
   3615 	default:
   3616 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   3617 			if (((ifp->if_flags & IFF_UP) == 0) && ((ifr->ifr_flags & IFF_UP) != 0)) {
   3618 				wm_clear_linkdown_discard(sc);
   3619 			} else if (((ifp->if_flags & IFF_UP) != 0) && ((ifr->ifr_flags & IFF_UP) == 0)) {
   3620 				wm_set_linkdown_discard(sc);
   3621 			}
   3622 		}
   3623 #ifdef WM_MPSAFE
   3624 		s = splnet();
   3625 #endif
   3626 		/* It may call wm_start, so unlock here */
   3627 		error = ether_ioctl(ifp, cmd, data);
   3628 #ifdef WM_MPSAFE
   3629 		splx(s);
   3630 #endif
   3631 		if (error != ENETRESET)
   3632 			break;
   3633 
   3634 		error = 0;
   3635 
   3636 		if (cmd == SIOCSIFCAP)
   3637 			error = (*ifp->if_init)(ifp);
   3638 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3639 			;
   3640 		else if (ifp->if_flags & IFF_RUNNING) {
   3641 			/*
   3642 			 * Multicast list has changed; set the hardware filter
   3643 			 * accordingly.
   3644 			 */
   3645 			WM_CORE_LOCK(sc);
   3646 			wm_set_filter(sc);
   3647 			WM_CORE_UNLOCK(sc);
   3648 		}
   3649 		break;
   3650 	}
   3651 
   3652 #ifndef WM_MPSAFE
   3653 	splx(s);
   3654 #endif
   3655 	return error;
   3656 }
   3657 
   3658 /* MAC address related */
   3659 
   3660 /*
   3661  * Get the offset of MAC address and return it.
   3662  * If error occured, use offset 0.
   3663  */
   3664 static uint16_t
   3665 wm_check_alt_mac_addr(struct wm_softc *sc)
   3666 {
   3667 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3668 	uint16_t offset = NVM_OFF_MACADDR;
   3669 
   3670 	/* Try to read alternative MAC address pointer */
   3671 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3672 		return 0;
   3673 
   3674 	/* Check pointer if it's valid or not. */
   3675 	if ((offset == 0x0000) || (offset == 0xffff))
   3676 		return 0;
   3677 
   3678 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3679 	/*
   3680 	 * Check whether alternative MAC address is valid or not.
   3681 	 * Some cards have non 0xffff pointer but those don't use
   3682 	 * alternative MAC address in reality.
   3683 	 *
   3684 	 * Check whether the broadcast bit is set or not.
   3685 	 */
   3686 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3687 		if (((myea[0] & 0xff) & 0x01) == 0)
   3688 			return offset; /* Found */
   3689 
   3690 	/* Not found */
   3691 	return 0;
   3692 }
   3693 
   3694 static int
   3695 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3696 {
   3697 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3698 	uint16_t offset = NVM_OFF_MACADDR;
   3699 	int do_invert = 0;
   3700 
   3701 	switch (sc->sc_type) {
   3702 	case WM_T_82580:
   3703 	case WM_T_I350:
   3704 	case WM_T_I354:
   3705 		/* EEPROM Top Level Partitioning */
   3706 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3707 		break;
   3708 	case WM_T_82571:
   3709 	case WM_T_82575:
   3710 	case WM_T_82576:
   3711 	case WM_T_80003:
   3712 	case WM_T_I210:
   3713 	case WM_T_I211:
   3714 		offset = wm_check_alt_mac_addr(sc);
   3715 		if (offset == 0)
   3716 			if ((sc->sc_funcid & 0x01) == 1)
   3717 				do_invert = 1;
   3718 		break;
   3719 	default:
   3720 		if ((sc->sc_funcid & 0x01) == 1)
   3721 			do_invert = 1;
   3722 		break;
   3723 	}
   3724 
   3725 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3726 		goto bad;
   3727 
   3728 	enaddr[0] = myea[0] & 0xff;
   3729 	enaddr[1] = myea[0] >> 8;
   3730 	enaddr[2] = myea[1] & 0xff;
   3731 	enaddr[3] = myea[1] >> 8;
   3732 	enaddr[4] = myea[2] & 0xff;
   3733 	enaddr[5] = myea[2] >> 8;
   3734 
   3735 	/*
   3736 	 * Toggle the LSB of the MAC address on the second port
   3737 	 * of some dual port cards.
   3738 	 */
   3739 	if (do_invert != 0)
   3740 		enaddr[5] ^= 1;
   3741 
   3742 	return 0;
   3743 
   3744  bad:
   3745 	return -1;
   3746 }
   3747 
   3748 /*
   3749  * wm_set_ral:
   3750  *
   3751  *	Set an entery in the receive address list.
   3752  */
   3753 static void
   3754 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3755 {
   3756 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3757 	uint32_t wlock_mac;
   3758 	int rv;
   3759 
   3760 	if (enaddr != NULL) {
   3761 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3762 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3763 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3764 		ral_hi |= RAL_AV;
   3765 	} else {
   3766 		ral_lo = 0;
   3767 		ral_hi = 0;
   3768 	}
   3769 
   3770 	switch (sc->sc_type) {
   3771 	case WM_T_82542_2_0:
   3772 	case WM_T_82542_2_1:
   3773 	case WM_T_82543:
   3774 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3775 		CSR_WRITE_FLUSH(sc);
   3776 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3777 		CSR_WRITE_FLUSH(sc);
   3778 		break;
   3779 	case WM_T_PCH2:
   3780 	case WM_T_PCH_LPT:
   3781 	case WM_T_PCH_SPT:
   3782 	case WM_T_PCH_CNP:
   3783 		if (idx == 0) {
   3784 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3785 			CSR_WRITE_FLUSH(sc);
   3786 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3787 			CSR_WRITE_FLUSH(sc);
   3788 			return;
   3789 		}
   3790 		if (sc->sc_type != WM_T_PCH2) {
   3791 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3792 			    FWSM_WLOCK_MAC);
   3793 			addrl = WMREG_SHRAL(idx - 1);
   3794 			addrh = WMREG_SHRAH(idx - 1);
   3795 		} else {
   3796 			wlock_mac = 0;
   3797 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3798 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3799 		}
   3800 
   3801 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3802 			rv = wm_get_swflag_ich8lan(sc);
   3803 			if (rv != 0)
   3804 				return;
   3805 			CSR_WRITE(sc, addrl, ral_lo);
   3806 			CSR_WRITE_FLUSH(sc);
   3807 			CSR_WRITE(sc, addrh, ral_hi);
   3808 			CSR_WRITE_FLUSH(sc);
   3809 			wm_put_swflag_ich8lan(sc);
   3810 		}
   3811 
   3812 		break;
   3813 	default:
   3814 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3815 		CSR_WRITE_FLUSH(sc);
   3816 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3817 		CSR_WRITE_FLUSH(sc);
   3818 		break;
   3819 	}
   3820 }
   3821 
   3822 /*
   3823  * wm_mchash:
   3824  *
   3825  *	Compute the hash of the multicast address for the 4096-bit
   3826  *	multicast filter.
   3827  */
   3828 static uint32_t
   3829 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3830 {
   3831 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3832 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3833 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3834 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3835 	uint32_t hash;
   3836 
   3837 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3838 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3839 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3840 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3841 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3842 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3843 		return (hash & 0x3ff);
   3844 	}
   3845 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3846 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3847 
   3848 	return (hash & 0xfff);
   3849 }
   3850 
   3851 /*
   3852  *
   3853  *
   3854  */
   3855 static int
   3856 wm_rar_count(struct wm_softc *sc)
   3857 {
   3858 	int size;
   3859 
   3860 	switch (sc->sc_type) {
   3861 	case WM_T_ICH8:
   3862 		size = WM_RAL_TABSIZE_ICH8 -1;
   3863 		break;
   3864 	case WM_T_ICH9:
   3865 	case WM_T_ICH10:
   3866 	case WM_T_PCH:
   3867 		size = WM_RAL_TABSIZE_ICH8;
   3868 		break;
   3869 	case WM_T_PCH2:
   3870 		size = WM_RAL_TABSIZE_PCH2;
   3871 		break;
   3872 	case WM_T_PCH_LPT:
   3873 	case WM_T_PCH_SPT:
   3874 	case WM_T_PCH_CNP:
   3875 		size = WM_RAL_TABSIZE_PCH_LPT;
   3876 		break;
   3877 	case WM_T_82575:
   3878 	case WM_T_I210:
   3879 	case WM_T_I211:
   3880 		size = WM_RAL_TABSIZE_82575;
   3881 		break;
   3882 	case WM_T_82576:
   3883 	case WM_T_82580:
   3884 		size = WM_RAL_TABSIZE_82576;
   3885 		break;
   3886 	case WM_T_I350:
   3887 	case WM_T_I354:
   3888 		size = WM_RAL_TABSIZE_I350;
   3889 		break;
   3890 	default:
   3891 		size = WM_RAL_TABSIZE;
   3892 	}
   3893 
   3894 	return size;
   3895 }
   3896 
   3897 /*
   3898  * wm_set_filter:
   3899  *
   3900  *	Set up the receive filter.
   3901  */
   3902 static void
   3903 wm_set_filter(struct wm_softc *sc)
   3904 {
   3905 	struct ethercom *ec = &sc->sc_ethercom;
   3906 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3907 	struct ether_multi *enm;
   3908 	struct ether_multistep step;
   3909 	bus_addr_t mta_reg;
   3910 	uint32_t hash, reg, bit;
   3911 	int i, size, ralmax, rv;
   3912 
   3913 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3914 		device_xname(sc->sc_dev), __func__));
   3915 
   3916 	if (sc->sc_type >= WM_T_82544)
   3917 		mta_reg = WMREG_CORDOVA_MTA;
   3918 	else
   3919 		mta_reg = WMREG_MTA;
   3920 
   3921 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3922 
   3923 	if (ifp->if_flags & IFF_BROADCAST)
   3924 		sc->sc_rctl |= RCTL_BAM;
   3925 	if (ifp->if_flags & IFF_PROMISC) {
   3926 		sc->sc_rctl |= RCTL_UPE;
   3927 		ETHER_LOCK(ec);
   3928 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3929 		ETHER_UNLOCK(ec);
   3930 		goto allmulti;
   3931 	}
   3932 
   3933 	/*
   3934 	 * Set the station address in the first RAL slot, and
   3935 	 * clear the remaining slots.
   3936 	 */
   3937 	size = wm_rar_count(sc);
   3938 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3939 
   3940 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3941 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3942 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3943 		switch (i) {
   3944 		case 0:
   3945 			/* We can use all entries */
   3946 			ralmax = size;
   3947 			break;
   3948 		case 1:
   3949 			/* Only RAR[0] */
   3950 			ralmax = 1;
   3951 			break;
   3952 		default:
   3953 			/* Available SHRA + RAR[0] */
   3954 			ralmax = i + 1;
   3955 		}
   3956 	} else
   3957 		ralmax = size;
   3958 	for (i = 1; i < size; i++) {
   3959 		if (i < ralmax)
   3960 			wm_set_ral(sc, NULL, i);
   3961 	}
   3962 
   3963 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3964 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3965 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3966 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3967 		size = WM_ICH8_MC_TABSIZE;
   3968 	else
   3969 		size = WM_MC_TABSIZE;
   3970 	/* Clear out the multicast table. */
   3971 	for (i = 0; i < size; i++) {
   3972 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3973 		CSR_WRITE_FLUSH(sc);
   3974 	}
   3975 
   3976 	ETHER_LOCK(ec);
   3977 	ETHER_FIRST_MULTI(step, ec, enm);
   3978 	while (enm != NULL) {
   3979 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3980 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3981 			ETHER_UNLOCK(ec);
   3982 			/*
   3983 			 * We must listen to a range of multicast addresses.
   3984 			 * For now, just accept all multicasts, rather than
   3985 			 * trying to set only those filter bits needed to match
   3986 			 * the range.  (At this time, the only use of address
   3987 			 * ranges is for IP multicast routing, for which the
   3988 			 * range is big enough to require all bits set.)
   3989 			 */
   3990 			goto allmulti;
   3991 		}
   3992 
   3993 		hash = wm_mchash(sc, enm->enm_addrlo);
   3994 
   3995 		reg = (hash >> 5);
   3996 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3997 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3998 		    || (sc->sc_type == WM_T_PCH2)
   3999 		    || (sc->sc_type == WM_T_PCH_LPT)
   4000 		    || (sc->sc_type == WM_T_PCH_SPT)
   4001 		    || (sc->sc_type == WM_T_PCH_CNP))
   4002 			reg &= 0x1f;
   4003 		else
   4004 			reg &= 0x7f;
   4005 		bit = hash & 0x1f;
   4006 
   4007 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4008 		hash |= 1U << bit;
   4009 
   4010 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4011 			/*
   4012 			 * 82544 Errata 9: Certain register cannot be written
   4013 			 * with particular alignments in PCI-X bus operation
   4014 			 * (FCAH, MTA and VFTA).
   4015 			 */
   4016 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4017 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4018 			CSR_WRITE_FLUSH(sc);
   4019 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4020 			CSR_WRITE_FLUSH(sc);
   4021 		} else {
   4022 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4023 			CSR_WRITE_FLUSH(sc);
   4024 		}
   4025 
   4026 		ETHER_NEXT_MULTI(step, enm);
   4027 	}
   4028 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4029 	ETHER_UNLOCK(ec);
   4030 
   4031 	goto setit;
   4032 
   4033  allmulti:
   4034 	sc->sc_rctl |= RCTL_MPE;
   4035 
   4036  setit:
   4037 	if (sc->sc_type >= WM_T_PCH2) {
   4038 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4039 		    && (ifp->if_mtu > ETHERMTU))
   4040 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4041 		else
   4042 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4043 		if (rv != 0)
   4044 			device_printf(sc->sc_dev,
   4045 			    "Failed to do workaround for jumbo frame.\n");
   4046 	}
   4047 
   4048 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4049 }
   4050 
   4051 /* Reset and init related */
   4052 
   4053 static void
   4054 wm_set_vlan(struct wm_softc *sc)
   4055 {
   4056 
   4057 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4058 		device_xname(sc->sc_dev), __func__));
   4059 
   4060 	/* Deal with VLAN enables. */
   4061 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4062 		sc->sc_ctrl |= CTRL_VME;
   4063 	else
   4064 		sc->sc_ctrl &= ~CTRL_VME;
   4065 
   4066 	/* Write the control registers. */
   4067 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4068 }
   4069 
   4070 static void
   4071 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4072 {
   4073 	uint32_t gcr;
   4074 	pcireg_t ctrl2;
   4075 
   4076 	gcr = CSR_READ(sc, WMREG_GCR);
   4077 
   4078 	/* Only take action if timeout value is defaulted to 0 */
   4079 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4080 		goto out;
   4081 
   4082 	if ((gcr & GCR_CAP_VER2) == 0) {
   4083 		gcr |= GCR_CMPL_TMOUT_10MS;
   4084 		goto out;
   4085 	}
   4086 
   4087 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4088 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4089 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4090 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4091 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4092 
   4093 out:
   4094 	/* Disable completion timeout resend */
   4095 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4096 
   4097 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4098 }
   4099 
   4100 void
   4101 wm_get_auto_rd_done(struct wm_softc *sc)
   4102 {
   4103 	int i;
   4104 
   4105 	/* wait for eeprom to reload */
   4106 	switch (sc->sc_type) {
   4107 	case WM_T_82571:
   4108 	case WM_T_82572:
   4109 	case WM_T_82573:
   4110 	case WM_T_82574:
   4111 	case WM_T_82583:
   4112 	case WM_T_82575:
   4113 	case WM_T_82576:
   4114 	case WM_T_82580:
   4115 	case WM_T_I350:
   4116 	case WM_T_I354:
   4117 	case WM_T_I210:
   4118 	case WM_T_I211:
   4119 	case WM_T_80003:
   4120 	case WM_T_ICH8:
   4121 	case WM_T_ICH9:
   4122 		for (i = 0; i < 10; i++) {
   4123 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4124 				break;
   4125 			delay(1000);
   4126 		}
   4127 		if (i == 10) {
   4128 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4129 			    "complete\n", device_xname(sc->sc_dev));
   4130 		}
   4131 		break;
   4132 	default:
   4133 		break;
   4134 	}
   4135 }
   4136 
   4137 void
   4138 wm_lan_init_done(struct wm_softc *sc)
   4139 {
   4140 	uint32_t reg = 0;
   4141 	int i;
   4142 
   4143 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4144 		device_xname(sc->sc_dev), __func__));
   4145 
   4146 	/* Wait for eeprom to reload */
   4147 	switch (sc->sc_type) {
   4148 	case WM_T_ICH10:
   4149 	case WM_T_PCH:
   4150 	case WM_T_PCH2:
   4151 	case WM_T_PCH_LPT:
   4152 	case WM_T_PCH_SPT:
   4153 	case WM_T_PCH_CNP:
   4154 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4155 			reg = CSR_READ(sc, WMREG_STATUS);
   4156 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4157 				break;
   4158 			delay(100);
   4159 		}
   4160 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4161 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4162 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4163 		}
   4164 		break;
   4165 	default:
   4166 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4167 		    __func__);
   4168 		break;
   4169 	}
   4170 
   4171 	reg &= ~STATUS_LAN_INIT_DONE;
   4172 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4173 }
   4174 
   4175 void
   4176 wm_get_cfg_done(struct wm_softc *sc)
   4177 {
   4178 	int mask;
   4179 	uint32_t reg;
   4180 	int i;
   4181 
   4182 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4183 		device_xname(sc->sc_dev), __func__));
   4184 
   4185 	/* Wait for eeprom to reload */
   4186 	switch (sc->sc_type) {
   4187 	case WM_T_82542_2_0:
   4188 	case WM_T_82542_2_1:
   4189 		/* null */
   4190 		break;
   4191 	case WM_T_82543:
   4192 	case WM_T_82544:
   4193 	case WM_T_82540:
   4194 	case WM_T_82545:
   4195 	case WM_T_82545_3:
   4196 	case WM_T_82546:
   4197 	case WM_T_82546_3:
   4198 	case WM_T_82541:
   4199 	case WM_T_82541_2:
   4200 	case WM_T_82547:
   4201 	case WM_T_82547_2:
   4202 	case WM_T_82573:
   4203 	case WM_T_82574:
   4204 	case WM_T_82583:
   4205 		/* generic */
   4206 		delay(10*1000);
   4207 		break;
   4208 	case WM_T_80003:
   4209 	case WM_T_82571:
   4210 	case WM_T_82572:
   4211 	case WM_T_82575:
   4212 	case WM_T_82576:
   4213 	case WM_T_82580:
   4214 	case WM_T_I350:
   4215 	case WM_T_I354:
   4216 	case WM_T_I210:
   4217 	case WM_T_I211:
   4218 		if (sc->sc_type == WM_T_82571) {
   4219 			/* Only 82571 shares port 0 */
   4220 			mask = EEMNGCTL_CFGDONE_0;
   4221 		} else
   4222 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4223 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4224 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4225 				break;
   4226 			delay(1000);
   4227 		}
   4228 		if (i >= WM_PHY_CFG_TIMEOUT)
   4229 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4230 				device_xname(sc->sc_dev), __func__));
   4231 		break;
   4232 	case WM_T_ICH8:
   4233 	case WM_T_ICH9:
   4234 	case WM_T_ICH10:
   4235 	case WM_T_PCH:
   4236 	case WM_T_PCH2:
   4237 	case WM_T_PCH_LPT:
   4238 	case WM_T_PCH_SPT:
   4239 	case WM_T_PCH_CNP:
   4240 		delay(10*1000);
   4241 		if (sc->sc_type >= WM_T_ICH10)
   4242 			wm_lan_init_done(sc);
   4243 		else
   4244 			wm_get_auto_rd_done(sc);
   4245 
   4246 		/* Clear PHY Reset Asserted bit */
   4247 		reg = CSR_READ(sc, WMREG_STATUS);
   4248 		if ((reg & STATUS_PHYRA) != 0)
   4249 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4250 		break;
   4251 	default:
   4252 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4253 		    __func__);
   4254 		break;
   4255 	}
   4256 }
   4257 
   4258 int
   4259 wm_phy_post_reset(struct wm_softc *sc)
   4260 {
   4261 	device_t dev = sc->sc_dev;
   4262 	uint16_t reg;
   4263 	int rv = 0;
   4264 
   4265 	/* This function is only for ICH8 and newer. */
   4266 	if (sc->sc_type < WM_T_ICH8)
   4267 		return 0;
   4268 
   4269 	if (wm_phy_resetisblocked(sc)) {
   4270 		/* XXX */
   4271 		device_printf(dev, "PHY is blocked\n");
   4272 		return -1;
   4273 	}
   4274 
   4275 	/* Allow time for h/w to get to quiescent state after reset */
   4276 	delay(10*1000);
   4277 
   4278 	/* Perform any necessary post-reset workarounds */
   4279 	if (sc->sc_type == WM_T_PCH)
   4280 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4281 	else if (sc->sc_type == WM_T_PCH2)
   4282 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4283 	if (rv != 0)
   4284 		return rv;
   4285 
   4286 	/* Clear the host wakeup bit after lcd reset */
   4287 	if (sc->sc_type >= WM_T_PCH) {
   4288 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4289 		reg &= ~BM_WUC_HOST_WU_BIT;
   4290 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4291 	}
   4292 
   4293 	/* Configure the LCD with the extended configuration region in NVM */
   4294 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4295 		return rv;
   4296 
   4297 	/* Configure the LCD with the OEM bits in NVM */
   4298 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4299 
   4300 	if (sc->sc_type == WM_T_PCH2) {
   4301 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4302 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4303 			delay(10 * 1000);
   4304 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4305 		}
   4306 		/* Set EEE LPI Update Timer to 200usec */
   4307 		rv = sc->phy.acquire(sc);
   4308 		if (rv)
   4309 			return rv;
   4310 		rv = wm_write_emi_reg_locked(dev,
   4311 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4312 		sc->phy.release(sc);
   4313 	}
   4314 
   4315 	return rv;
   4316 }
   4317 
   4318 /* Only for PCH and newer */
   4319 static int
   4320 wm_write_smbus_addr(struct wm_softc *sc)
   4321 {
   4322 	uint32_t strap, freq;
   4323 	uint16_t phy_data;
   4324 	int rv;
   4325 
   4326 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4327 		device_xname(sc->sc_dev), __func__));
   4328 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4329 
   4330 	strap = CSR_READ(sc, WMREG_STRAP);
   4331 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4332 
   4333 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4334 	if (rv != 0)
   4335 		return -1;
   4336 
   4337 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4338 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4339 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4340 
   4341 	if (sc->sc_phytype == WMPHY_I217) {
   4342 		/* Restore SMBus frequency */
   4343 		if (freq --) {
   4344 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4345 			    | HV_SMB_ADDR_FREQ_HIGH);
   4346 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4347 			    HV_SMB_ADDR_FREQ_LOW);
   4348 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4349 			    HV_SMB_ADDR_FREQ_HIGH);
   4350 		} else
   4351 			DPRINTF(sc, WM_DEBUG_INIT,
   4352 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4353 				device_xname(sc->sc_dev), __func__));
   4354 	}
   4355 
   4356 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4357 	    phy_data);
   4358 }
   4359 
   4360 static int
   4361 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4362 {
   4363 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4364 	uint16_t phy_page = 0;
   4365 	int rv = 0;
   4366 
   4367 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4368 		device_xname(sc->sc_dev), __func__));
   4369 
   4370 	switch (sc->sc_type) {
   4371 	case WM_T_ICH8:
   4372 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4373 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4374 			return 0;
   4375 
   4376 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4377 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4378 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4379 			break;
   4380 		}
   4381 		/* FALLTHROUGH */
   4382 	case WM_T_PCH:
   4383 	case WM_T_PCH2:
   4384 	case WM_T_PCH_LPT:
   4385 	case WM_T_PCH_SPT:
   4386 	case WM_T_PCH_CNP:
   4387 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4388 		break;
   4389 	default:
   4390 		return 0;
   4391 	}
   4392 
   4393 	if ((rv = sc->phy.acquire(sc)) != 0)
   4394 		return rv;
   4395 
   4396 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4397 	if ((reg & sw_cfg_mask) == 0)
   4398 		goto release;
   4399 
   4400 	/*
   4401 	 * Make sure HW does not configure LCD from PHY extended configuration
   4402 	 * before SW configuration
   4403 	 */
   4404 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4405 	if ((sc->sc_type < WM_T_PCH2)
   4406 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4407 		goto release;
   4408 
   4409 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4410 		device_xname(sc->sc_dev), __func__));
   4411 	/* word_addr is in DWORD */
   4412 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4413 
   4414 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4415 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4416 	if (cnf_size == 0)
   4417 		goto release;
   4418 
   4419 	if (((sc->sc_type == WM_T_PCH)
   4420 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4421 	    || (sc->sc_type > WM_T_PCH)) {
   4422 		/*
   4423 		 * HW configures the SMBus address and LEDs when the OEM and
   4424 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4425 		 * are cleared, SW will configure them instead.
   4426 		 */
   4427 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4428 			device_xname(sc->sc_dev), __func__));
   4429 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4430 			goto release;
   4431 
   4432 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4433 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4434 		    (uint16_t)reg);
   4435 		if (rv != 0)
   4436 			goto release;
   4437 	}
   4438 
   4439 	/* Configure LCD from extended configuration region. */
   4440 	for (i = 0; i < cnf_size; i++) {
   4441 		uint16_t reg_data, reg_addr;
   4442 
   4443 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4444 			goto release;
   4445 
   4446 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4447 			goto release;
   4448 
   4449 		if (reg_addr == IGPHY_PAGE_SELECT)
   4450 			phy_page = reg_data;
   4451 
   4452 		reg_addr &= IGPHY_MAXREGADDR;
   4453 		reg_addr |= phy_page;
   4454 
   4455 		KASSERT(sc->phy.writereg_locked != NULL);
   4456 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4457 		    reg_data);
   4458 	}
   4459 
   4460 release:
   4461 	sc->phy.release(sc);
   4462 	return rv;
   4463 }
   4464 
   4465 /*
   4466  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4467  *  @sc:       pointer to the HW structure
   4468  *  @d0_state: boolean if entering d0 or d3 device state
   4469  *
   4470  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4471  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4472  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4473  */
   4474 int
   4475 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4476 {
   4477 	uint32_t mac_reg;
   4478 	uint16_t oem_reg;
   4479 	int rv;
   4480 
   4481 	if (sc->sc_type < WM_T_PCH)
   4482 		return 0;
   4483 
   4484 	rv = sc->phy.acquire(sc);
   4485 	if (rv != 0)
   4486 		return rv;
   4487 
   4488 	if (sc->sc_type == WM_T_PCH) {
   4489 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4490 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4491 			goto release;
   4492 	}
   4493 
   4494 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4495 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4496 		goto release;
   4497 
   4498 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4499 
   4500 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4501 	if (rv != 0)
   4502 		goto release;
   4503 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4504 
   4505 	if (d0_state) {
   4506 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4507 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4508 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4509 			oem_reg |= HV_OEM_BITS_LPLU;
   4510 	} else {
   4511 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4512 		    != 0)
   4513 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4514 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4515 		    != 0)
   4516 			oem_reg |= HV_OEM_BITS_LPLU;
   4517 	}
   4518 
   4519 	/* Set Restart auto-neg to activate the bits */
   4520 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4521 	    && (wm_phy_resetisblocked(sc) == false))
   4522 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4523 
   4524 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4525 
   4526 release:
   4527 	sc->phy.release(sc);
   4528 
   4529 	return rv;
   4530 }
   4531 
   4532 /* Init hardware bits */
   4533 void
   4534 wm_initialize_hardware_bits(struct wm_softc *sc)
   4535 {
   4536 	uint32_t tarc0, tarc1, reg;
   4537 
   4538 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4539 		device_xname(sc->sc_dev), __func__));
   4540 
   4541 	/* For 82571 variant, 80003 and ICHs */
   4542 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4543 	    || (sc->sc_type >= WM_T_80003)) {
   4544 
   4545 		/* Transmit Descriptor Control 0 */
   4546 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4547 		reg |= TXDCTL_COUNT_DESC;
   4548 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4549 
   4550 		/* Transmit Descriptor Control 1 */
   4551 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4552 		reg |= TXDCTL_COUNT_DESC;
   4553 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4554 
   4555 		/* TARC0 */
   4556 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4557 		switch (sc->sc_type) {
   4558 		case WM_T_82571:
   4559 		case WM_T_82572:
   4560 		case WM_T_82573:
   4561 		case WM_T_82574:
   4562 		case WM_T_82583:
   4563 		case WM_T_80003:
   4564 			/* Clear bits 30..27 */
   4565 			tarc0 &= ~__BITS(30, 27);
   4566 			break;
   4567 		default:
   4568 			break;
   4569 		}
   4570 
   4571 		switch (sc->sc_type) {
   4572 		case WM_T_82571:
   4573 		case WM_T_82572:
   4574 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4575 
   4576 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4577 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4578 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4579 			/* 8257[12] Errata No.7 */
   4580 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4581 
   4582 			/* TARC1 bit 28 */
   4583 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4584 				tarc1 &= ~__BIT(28);
   4585 			else
   4586 				tarc1 |= __BIT(28);
   4587 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4588 
   4589 			/*
   4590 			 * 8257[12] Errata No.13
   4591 			 * Disable Dyamic Clock Gating.
   4592 			 */
   4593 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4594 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4595 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4596 			break;
   4597 		case WM_T_82573:
   4598 		case WM_T_82574:
   4599 		case WM_T_82583:
   4600 			if ((sc->sc_type == WM_T_82574)
   4601 			    || (sc->sc_type == WM_T_82583))
   4602 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4603 
   4604 			/* Extended Device Control */
   4605 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4606 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4607 			reg |= __BIT(22);	/* Set bit 22 */
   4608 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4609 
   4610 			/* Device Control */
   4611 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4612 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4613 
   4614 			/* PCIe Control Register */
   4615 			/*
   4616 			 * 82573 Errata (unknown).
   4617 			 *
   4618 			 * 82574 Errata 25 and 82583 Errata 12
   4619 			 * "Dropped Rx Packets":
   4620 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4621 			 */
   4622 			reg = CSR_READ(sc, WMREG_GCR);
   4623 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4624 			CSR_WRITE(sc, WMREG_GCR, reg);
   4625 
   4626 			if ((sc->sc_type == WM_T_82574)
   4627 			    || (sc->sc_type == WM_T_82583)) {
   4628 				/*
   4629 				 * Document says this bit must be set for
   4630 				 * proper operation.
   4631 				 */
   4632 				reg = CSR_READ(sc, WMREG_GCR);
   4633 				reg |= __BIT(22);
   4634 				CSR_WRITE(sc, WMREG_GCR, reg);
   4635 
   4636 				/*
   4637 				 * Apply workaround for hardware errata
   4638 				 * documented in errata docs Fixes issue where
   4639 				 * some error prone or unreliable PCIe
   4640 				 * completions are occurring, particularly
   4641 				 * with ASPM enabled. Without fix, issue can
   4642 				 * cause Tx timeouts.
   4643 				 */
   4644 				reg = CSR_READ(sc, WMREG_GCR2);
   4645 				reg |= __BIT(0);
   4646 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4647 			}
   4648 			break;
   4649 		case WM_T_80003:
   4650 			/* TARC0 */
   4651 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4652 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4653 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4654 
   4655 			/* TARC1 bit 28 */
   4656 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4657 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4658 				tarc1 &= ~__BIT(28);
   4659 			else
   4660 				tarc1 |= __BIT(28);
   4661 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4662 			break;
   4663 		case WM_T_ICH8:
   4664 		case WM_T_ICH9:
   4665 		case WM_T_ICH10:
   4666 		case WM_T_PCH:
   4667 		case WM_T_PCH2:
   4668 		case WM_T_PCH_LPT:
   4669 		case WM_T_PCH_SPT:
   4670 		case WM_T_PCH_CNP:
   4671 			/* TARC0 */
   4672 			if (sc->sc_type == WM_T_ICH8) {
   4673 				/* Set TARC0 bits 29 and 28 */
   4674 				tarc0 |= __BITS(29, 28);
   4675 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4676 				tarc0 |= __BIT(29);
   4677 				/*
   4678 				 *  Drop bit 28. From Linux.
   4679 				 * See I218/I219 spec update
   4680 				 * "5. Buffer Overrun While the I219 is
   4681 				 * Processing DMA Transactions"
   4682 				 */
   4683 				tarc0 &= ~__BIT(28);
   4684 			}
   4685 			/* Set TARC0 bits 23,24,26,27 */
   4686 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4687 
   4688 			/* CTRL_EXT */
   4689 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4690 			reg |= __BIT(22);	/* Set bit 22 */
   4691 			/*
   4692 			 * Enable PHY low-power state when MAC is at D3
   4693 			 * w/o WoL
   4694 			 */
   4695 			if (sc->sc_type >= WM_T_PCH)
   4696 				reg |= CTRL_EXT_PHYPDEN;
   4697 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4698 
   4699 			/* TARC1 */
   4700 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4701 			/* bit 28 */
   4702 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4703 				tarc1 &= ~__BIT(28);
   4704 			else
   4705 				tarc1 |= __BIT(28);
   4706 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4707 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4708 
   4709 			/* Device Status */
   4710 			if (sc->sc_type == WM_T_ICH8) {
   4711 				reg = CSR_READ(sc, WMREG_STATUS);
   4712 				reg &= ~__BIT(31);
   4713 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4714 
   4715 			}
   4716 
   4717 			/* IOSFPC */
   4718 			if (sc->sc_type == WM_T_PCH_SPT) {
   4719 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4720 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4721 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4722 			}
   4723 			/*
   4724 			 * Work-around descriptor data corruption issue during
   4725 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4726 			 * capability.
   4727 			 */
   4728 			reg = CSR_READ(sc, WMREG_RFCTL);
   4729 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4730 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4731 			break;
   4732 		default:
   4733 			break;
   4734 		}
   4735 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4736 
   4737 		switch (sc->sc_type) {
   4738 		/*
   4739 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4740 		 * Avoid RSS Hash Value bug.
   4741 		 */
   4742 		case WM_T_82571:
   4743 		case WM_T_82572:
   4744 		case WM_T_82573:
   4745 		case WM_T_80003:
   4746 		case WM_T_ICH8:
   4747 			reg = CSR_READ(sc, WMREG_RFCTL);
   4748 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4749 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4750 			break;
   4751 		case WM_T_82574:
   4752 			/* Use extened Rx descriptor. */
   4753 			reg = CSR_READ(sc, WMREG_RFCTL);
   4754 			reg |= WMREG_RFCTL_EXSTEN;
   4755 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4756 			break;
   4757 		default:
   4758 			break;
   4759 		}
   4760 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4761 		/*
   4762 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4763 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4764 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4765 		 * Correctly by the Device"
   4766 		 *
   4767 		 * I354(C2000) Errata AVR53:
   4768 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4769 		 * Hang"
   4770 		 */
   4771 		reg = CSR_READ(sc, WMREG_RFCTL);
   4772 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4773 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4774 	}
   4775 }
   4776 
   4777 static uint32_t
   4778 wm_rxpbs_adjust_82580(uint32_t val)
   4779 {
   4780 	uint32_t rv = 0;
   4781 
   4782 	if (val < __arraycount(wm_82580_rxpbs_table))
   4783 		rv = wm_82580_rxpbs_table[val];
   4784 
   4785 	return rv;
   4786 }
   4787 
   4788 /*
   4789  * wm_reset_phy:
   4790  *
   4791  *	generic PHY reset function.
   4792  *	Same as e1000_phy_hw_reset_generic()
   4793  */
   4794 static int
   4795 wm_reset_phy(struct wm_softc *sc)
   4796 {
   4797 	uint32_t reg;
   4798 
   4799 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4800 		device_xname(sc->sc_dev), __func__));
   4801 	if (wm_phy_resetisblocked(sc))
   4802 		return -1;
   4803 
   4804 	sc->phy.acquire(sc);
   4805 
   4806 	reg = CSR_READ(sc, WMREG_CTRL);
   4807 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4808 	CSR_WRITE_FLUSH(sc);
   4809 
   4810 	delay(sc->phy.reset_delay_us);
   4811 
   4812 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4813 	CSR_WRITE_FLUSH(sc);
   4814 
   4815 	delay(150);
   4816 
   4817 	sc->phy.release(sc);
   4818 
   4819 	wm_get_cfg_done(sc);
   4820 	wm_phy_post_reset(sc);
   4821 
   4822 	return 0;
   4823 }
   4824 
   4825 /*
   4826  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4827  * so it is enough to check sc->sc_queue[0] only.
   4828  */
   4829 static void
   4830 wm_flush_desc_rings(struct wm_softc *sc)
   4831 {
   4832 	pcireg_t preg;
   4833 	uint32_t reg;
   4834 	struct wm_txqueue *txq;
   4835 	wiseman_txdesc_t *txd;
   4836 	int nexttx;
   4837 	uint32_t rctl;
   4838 
   4839 	/* First, disable MULR fix in FEXTNVM11 */
   4840 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4841 	reg |= FEXTNVM11_DIS_MULRFIX;
   4842 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4843 
   4844 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4845 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4846 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4847 		return;
   4848 
   4849 	/* TX */
   4850 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4851 	    preg, reg);
   4852 	reg = CSR_READ(sc, WMREG_TCTL);
   4853 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4854 
   4855 	txq = &sc->sc_queue[0].wmq_txq;
   4856 	nexttx = txq->txq_next;
   4857 	txd = &txq->txq_descs[nexttx];
   4858 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4859 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4860 	txd->wtx_fields.wtxu_status = 0;
   4861 	txd->wtx_fields.wtxu_options = 0;
   4862 	txd->wtx_fields.wtxu_vlan = 0;
   4863 
   4864 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4865 	    BUS_SPACE_BARRIER_WRITE);
   4866 
   4867 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4868 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4869 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4870 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4871 	delay(250);
   4872 
   4873 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4874 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4875 		return;
   4876 
   4877 	/* RX */
   4878 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4879 	rctl = CSR_READ(sc, WMREG_RCTL);
   4880 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4881 	CSR_WRITE_FLUSH(sc);
   4882 	delay(150);
   4883 
   4884 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4885 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4886 	reg &= 0xffffc000;
   4887 	/*
   4888 	 * Update thresholds: prefetch threshold to 31, host threshold
   4889 	 * to 1 and make sure the granularity is "descriptors" and not
   4890 	 * "cache lines"
   4891 	 */
   4892 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4893 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4894 
   4895 	/* Momentarily enable the RX ring for the changes to take effect */
   4896 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4897 	CSR_WRITE_FLUSH(sc);
   4898 	delay(150);
   4899 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4900 }
   4901 
   4902 /*
   4903  * wm_reset:
   4904  *
   4905  *	Reset the i82542 chip.
   4906  */
   4907 static void
   4908 wm_reset(struct wm_softc *sc)
   4909 {
   4910 	int phy_reset = 0;
   4911 	int i, error = 0;
   4912 	uint32_t reg;
   4913 	uint16_t kmreg;
   4914 	int rv;
   4915 
   4916 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4917 		device_xname(sc->sc_dev), __func__));
   4918 	KASSERT(sc->sc_type != 0);
   4919 
   4920 	/*
   4921 	 * Allocate on-chip memory according to the MTU size.
   4922 	 * The Packet Buffer Allocation register must be written
   4923 	 * before the chip is reset.
   4924 	 */
   4925 	switch (sc->sc_type) {
   4926 	case WM_T_82547:
   4927 	case WM_T_82547_2:
   4928 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4929 		    PBA_22K : PBA_30K;
   4930 		for (i = 0; i < sc->sc_nqueues; i++) {
   4931 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4932 			txq->txq_fifo_head = 0;
   4933 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4934 			txq->txq_fifo_size =
   4935 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4936 			txq->txq_fifo_stall = 0;
   4937 		}
   4938 		break;
   4939 	case WM_T_82571:
   4940 	case WM_T_82572:
   4941 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4942 	case WM_T_80003:
   4943 		sc->sc_pba = PBA_32K;
   4944 		break;
   4945 	case WM_T_82573:
   4946 		sc->sc_pba = PBA_12K;
   4947 		break;
   4948 	case WM_T_82574:
   4949 	case WM_T_82583:
   4950 		sc->sc_pba = PBA_20K;
   4951 		break;
   4952 	case WM_T_82576:
   4953 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4954 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4955 		break;
   4956 	case WM_T_82580:
   4957 	case WM_T_I350:
   4958 	case WM_T_I354:
   4959 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4960 		break;
   4961 	case WM_T_I210:
   4962 	case WM_T_I211:
   4963 		sc->sc_pba = PBA_34K;
   4964 		break;
   4965 	case WM_T_ICH8:
   4966 		/* Workaround for a bit corruption issue in FIFO memory */
   4967 		sc->sc_pba = PBA_8K;
   4968 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4969 		break;
   4970 	case WM_T_ICH9:
   4971 	case WM_T_ICH10:
   4972 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4973 		    PBA_14K : PBA_10K;
   4974 		break;
   4975 	case WM_T_PCH:
   4976 	case WM_T_PCH2:	/* XXX 14K? */
   4977 	case WM_T_PCH_LPT:
   4978 	case WM_T_PCH_SPT:
   4979 	case WM_T_PCH_CNP:
   4980 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   4981 		    PBA_12K : PBA_26K;
   4982 		break;
   4983 	default:
   4984 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4985 		    PBA_40K : PBA_48K;
   4986 		break;
   4987 	}
   4988 	/*
   4989 	 * Only old or non-multiqueue devices have the PBA register
   4990 	 * XXX Need special handling for 82575.
   4991 	 */
   4992 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4993 	    || (sc->sc_type == WM_T_82575))
   4994 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4995 
   4996 	/* Prevent the PCI-E bus from sticking */
   4997 	if (sc->sc_flags & WM_F_PCIE) {
   4998 		int timeout = 800;
   4999 
   5000 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5001 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5002 
   5003 		while (timeout--) {
   5004 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5005 			    == 0)
   5006 				break;
   5007 			delay(100);
   5008 		}
   5009 		if (timeout == 0)
   5010 			device_printf(sc->sc_dev,
   5011 			    "failed to disable busmastering\n");
   5012 	}
   5013 
   5014 	/* Set the completion timeout for interface */
   5015 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5016 	    || (sc->sc_type == WM_T_82580)
   5017 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5018 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5019 		wm_set_pcie_completion_timeout(sc);
   5020 
   5021 	/* Clear interrupt */
   5022 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5023 	if (wm_is_using_msix(sc)) {
   5024 		if (sc->sc_type != WM_T_82574) {
   5025 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5026 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5027 		} else
   5028 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5029 	}
   5030 
   5031 	/* Stop the transmit and receive processes. */
   5032 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5033 	sc->sc_rctl &= ~RCTL_EN;
   5034 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5035 	CSR_WRITE_FLUSH(sc);
   5036 
   5037 	/* XXX set_tbi_sbp_82543() */
   5038 
   5039 	delay(10*1000);
   5040 
   5041 	/* Must acquire the MDIO ownership before MAC reset */
   5042 	switch (sc->sc_type) {
   5043 	case WM_T_82573:
   5044 	case WM_T_82574:
   5045 	case WM_T_82583:
   5046 		error = wm_get_hw_semaphore_82573(sc);
   5047 		break;
   5048 	default:
   5049 		break;
   5050 	}
   5051 
   5052 	/*
   5053 	 * 82541 Errata 29? & 82547 Errata 28?
   5054 	 * See also the description about PHY_RST bit in CTRL register
   5055 	 * in 8254x_GBe_SDM.pdf.
   5056 	 */
   5057 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5058 		CSR_WRITE(sc, WMREG_CTRL,
   5059 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5060 		CSR_WRITE_FLUSH(sc);
   5061 		delay(5000);
   5062 	}
   5063 
   5064 	switch (sc->sc_type) {
   5065 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5066 	case WM_T_82541:
   5067 	case WM_T_82541_2:
   5068 	case WM_T_82547:
   5069 	case WM_T_82547_2:
   5070 		/*
   5071 		 * On some chipsets, a reset through a memory-mapped write
   5072 		 * cycle can cause the chip to reset before completing the
   5073 		 * write cycle. This causes major headache that can be avoided
   5074 		 * by issuing the reset via indirect register writes through
   5075 		 * I/O space.
   5076 		 *
   5077 		 * So, if we successfully mapped the I/O BAR at attach time,
   5078 		 * use that. Otherwise, try our luck with a memory-mapped
   5079 		 * reset.
   5080 		 */
   5081 		if (sc->sc_flags & WM_F_IOH_VALID)
   5082 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5083 		else
   5084 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5085 		break;
   5086 	case WM_T_82545_3:
   5087 	case WM_T_82546_3:
   5088 		/* Use the shadow control register on these chips. */
   5089 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5090 		break;
   5091 	case WM_T_80003:
   5092 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5093 		sc->phy.acquire(sc);
   5094 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5095 		sc->phy.release(sc);
   5096 		break;
   5097 	case WM_T_ICH8:
   5098 	case WM_T_ICH9:
   5099 	case WM_T_ICH10:
   5100 	case WM_T_PCH:
   5101 	case WM_T_PCH2:
   5102 	case WM_T_PCH_LPT:
   5103 	case WM_T_PCH_SPT:
   5104 	case WM_T_PCH_CNP:
   5105 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5106 		if (wm_phy_resetisblocked(sc) == false) {
   5107 			/*
   5108 			 * Gate automatic PHY configuration by hardware on
   5109 			 * non-managed 82579
   5110 			 */
   5111 			if ((sc->sc_type == WM_T_PCH2)
   5112 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5113 				== 0))
   5114 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5115 
   5116 			reg |= CTRL_PHY_RESET;
   5117 			phy_reset = 1;
   5118 		} else
   5119 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5120 		sc->phy.acquire(sc);
   5121 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5122 		/* Don't insert a completion barrier when reset */
   5123 		delay(20*1000);
   5124 		mutex_exit(sc->sc_ich_phymtx);
   5125 		break;
   5126 	case WM_T_82580:
   5127 	case WM_T_I350:
   5128 	case WM_T_I354:
   5129 	case WM_T_I210:
   5130 	case WM_T_I211:
   5131 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5132 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5133 			CSR_WRITE_FLUSH(sc);
   5134 		delay(5000);
   5135 		break;
   5136 	case WM_T_82542_2_0:
   5137 	case WM_T_82542_2_1:
   5138 	case WM_T_82543:
   5139 	case WM_T_82540:
   5140 	case WM_T_82545:
   5141 	case WM_T_82546:
   5142 	case WM_T_82571:
   5143 	case WM_T_82572:
   5144 	case WM_T_82573:
   5145 	case WM_T_82574:
   5146 	case WM_T_82575:
   5147 	case WM_T_82576:
   5148 	case WM_T_82583:
   5149 	default:
   5150 		/* Everything else can safely use the documented method. */
   5151 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5152 		break;
   5153 	}
   5154 
   5155 	/* Must release the MDIO ownership after MAC reset */
   5156 	switch (sc->sc_type) {
   5157 	case WM_T_82573:
   5158 	case WM_T_82574:
   5159 	case WM_T_82583:
   5160 		if (error == 0)
   5161 			wm_put_hw_semaphore_82573(sc);
   5162 		break;
   5163 	default:
   5164 		break;
   5165 	}
   5166 
   5167 	/* Set Phy Config Counter to 50msec */
   5168 	if (sc->sc_type == WM_T_PCH2) {
   5169 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5170 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5171 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5172 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5173 	}
   5174 
   5175 	if (phy_reset != 0)
   5176 		wm_get_cfg_done(sc);
   5177 
   5178 	/* Reload EEPROM */
   5179 	switch (sc->sc_type) {
   5180 	case WM_T_82542_2_0:
   5181 	case WM_T_82542_2_1:
   5182 	case WM_T_82543:
   5183 	case WM_T_82544:
   5184 		delay(10);
   5185 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5186 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5187 		CSR_WRITE_FLUSH(sc);
   5188 		delay(2000);
   5189 		break;
   5190 	case WM_T_82540:
   5191 	case WM_T_82545:
   5192 	case WM_T_82545_3:
   5193 	case WM_T_82546:
   5194 	case WM_T_82546_3:
   5195 		delay(5*1000);
   5196 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5197 		break;
   5198 	case WM_T_82541:
   5199 	case WM_T_82541_2:
   5200 	case WM_T_82547:
   5201 	case WM_T_82547_2:
   5202 		delay(20000);
   5203 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5204 		break;
   5205 	case WM_T_82571:
   5206 	case WM_T_82572:
   5207 	case WM_T_82573:
   5208 	case WM_T_82574:
   5209 	case WM_T_82583:
   5210 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5211 			delay(10);
   5212 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5213 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5214 			CSR_WRITE_FLUSH(sc);
   5215 		}
   5216 		/* check EECD_EE_AUTORD */
   5217 		wm_get_auto_rd_done(sc);
   5218 		/*
   5219 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5220 		 * is set.
   5221 		 */
   5222 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5223 		    || (sc->sc_type == WM_T_82583))
   5224 			delay(25*1000);
   5225 		break;
   5226 	case WM_T_82575:
   5227 	case WM_T_82576:
   5228 	case WM_T_82580:
   5229 	case WM_T_I350:
   5230 	case WM_T_I354:
   5231 	case WM_T_I210:
   5232 	case WM_T_I211:
   5233 	case WM_T_80003:
   5234 		/* check EECD_EE_AUTORD */
   5235 		wm_get_auto_rd_done(sc);
   5236 		break;
   5237 	case WM_T_ICH8:
   5238 	case WM_T_ICH9:
   5239 	case WM_T_ICH10:
   5240 	case WM_T_PCH:
   5241 	case WM_T_PCH2:
   5242 	case WM_T_PCH_LPT:
   5243 	case WM_T_PCH_SPT:
   5244 	case WM_T_PCH_CNP:
   5245 		break;
   5246 	default:
   5247 		panic("%s: unknown type\n", __func__);
   5248 	}
   5249 
   5250 	/* Check whether EEPROM is present or not */
   5251 	switch (sc->sc_type) {
   5252 	case WM_T_82575:
   5253 	case WM_T_82576:
   5254 	case WM_T_82580:
   5255 	case WM_T_I350:
   5256 	case WM_T_I354:
   5257 	case WM_T_ICH8:
   5258 	case WM_T_ICH9:
   5259 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5260 			/* Not found */
   5261 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5262 			if (sc->sc_type == WM_T_82575)
   5263 				wm_reset_init_script_82575(sc);
   5264 		}
   5265 		break;
   5266 	default:
   5267 		break;
   5268 	}
   5269 
   5270 	if (phy_reset != 0)
   5271 		wm_phy_post_reset(sc);
   5272 
   5273 	if ((sc->sc_type == WM_T_82580)
   5274 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5275 		/* Clear global device reset status bit */
   5276 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5277 	}
   5278 
   5279 	/* Clear any pending interrupt events. */
   5280 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5281 	reg = CSR_READ(sc, WMREG_ICR);
   5282 	if (wm_is_using_msix(sc)) {
   5283 		if (sc->sc_type != WM_T_82574) {
   5284 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5285 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5286 		} else
   5287 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5288 	}
   5289 
   5290 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5291 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5292 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5293 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5294 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5295 		reg |= KABGTXD_BGSQLBIAS;
   5296 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5297 	}
   5298 
   5299 	/* Reload sc_ctrl */
   5300 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5301 
   5302 	wm_set_eee(sc);
   5303 
   5304 	/*
   5305 	 * For PCH, this write will make sure that any noise will be detected
   5306 	 * as a CRC error and be dropped rather than show up as a bad packet
   5307 	 * to the DMA engine
   5308 	 */
   5309 	if (sc->sc_type == WM_T_PCH)
   5310 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5311 
   5312 	if (sc->sc_type >= WM_T_82544)
   5313 		CSR_WRITE(sc, WMREG_WUC, 0);
   5314 
   5315 	if (sc->sc_type < WM_T_82575)
   5316 		wm_disable_aspm(sc); /* Workaround for some chips */
   5317 
   5318 	wm_reset_mdicnfg_82580(sc);
   5319 
   5320 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5321 		wm_pll_workaround_i210(sc);
   5322 
   5323 	if (sc->sc_type == WM_T_80003) {
   5324 		/* Default to TRUE to enable the MDIC W/A */
   5325 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5326 
   5327 		rv = wm_kmrn_readreg(sc,
   5328 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5329 		if (rv == 0) {
   5330 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5331 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5332 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5333 			else
   5334 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5335 		}
   5336 	}
   5337 }
   5338 
   5339 /*
   5340  * wm_add_rxbuf:
   5341  *
   5342  *	Add a receive buffer to the indiciated descriptor.
   5343  */
   5344 static int
   5345 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5346 {
   5347 	struct wm_softc *sc = rxq->rxq_sc;
   5348 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5349 	struct mbuf *m;
   5350 	int error;
   5351 
   5352 	KASSERT(mutex_owned(rxq->rxq_lock));
   5353 
   5354 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5355 	if (m == NULL)
   5356 		return ENOBUFS;
   5357 
   5358 	MCLGET(m, M_DONTWAIT);
   5359 	if ((m->m_flags & M_EXT) == 0) {
   5360 		m_freem(m);
   5361 		return ENOBUFS;
   5362 	}
   5363 
   5364 	if (rxs->rxs_mbuf != NULL)
   5365 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5366 
   5367 	rxs->rxs_mbuf = m;
   5368 
   5369 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5370 	/*
   5371 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5372 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5373 	 */
   5374 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5375 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5376 	if (error) {
   5377 		/* XXX XXX XXX */
   5378 		aprint_error_dev(sc->sc_dev,
   5379 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5380 		panic("wm_add_rxbuf");
   5381 	}
   5382 
   5383 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5384 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5385 
   5386 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5387 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5388 			wm_init_rxdesc(rxq, idx);
   5389 	} else
   5390 		wm_init_rxdesc(rxq, idx);
   5391 
   5392 	return 0;
   5393 }
   5394 
   5395 /*
   5396  * wm_rxdrain:
   5397  *
   5398  *	Drain the receive queue.
   5399  */
   5400 static void
   5401 wm_rxdrain(struct wm_rxqueue *rxq)
   5402 {
   5403 	struct wm_softc *sc = rxq->rxq_sc;
   5404 	struct wm_rxsoft *rxs;
   5405 	int i;
   5406 
   5407 	KASSERT(mutex_owned(rxq->rxq_lock));
   5408 
   5409 	for (i = 0; i < WM_NRXDESC; i++) {
   5410 		rxs = &rxq->rxq_soft[i];
   5411 		if (rxs->rxs_mbuf != NULL) {
   5412 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5413 			m_freem(rxs->rxs_mbuf);
   5414 			rxs->rxs_mbuf = NULL;
   5415 		}
   5416 	}
   5417 }
   5418 
   5419 /*
   5420  * Setup registers for RSS.
   5421  *
   5422  * XXX not yet VMDq support
   5423  */
   5424 static void
   5425 wm_init_rss(struct wm_softc *sc)
   5426 {
   5427 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5428 	int i;
   5429 
   5430 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5431 
   5432 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5433 		unsigned int qid, reta_ent;
   5434 
   5435 		qid  = i % sc->sc_nqueues;
   5436 		switch (sc->sc_type) {
   5437 		case WM_T_82574:
   5438 			reta_ent = __SHIFTIN(qid,
   5439 			    RETA_ENT_QINDEX_MASK_82574);
   5440 			break;
   5441 		case WM_T_82575:
   5442 			reta_ent = __SHIFTIN(qid,
   5443 			    RETA_ENT_QINDEX1_MASK_82575);
   5444 			break;
   5445 		default:
   5446 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5447 			break;
   5448 		}
   5449 
   5450 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5451 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5452 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5453 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5454 	}
   5455 
   5456 	rss_getkey((uint8_t *)rss_key);
   5457 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5458 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5459 
   5460 	if (sc->sc_type == WM_T_82574)
   5461 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5462 	else
   5463 		mrqc = MRQC_ENABLE_RSS_MQ;
   5464 
   5465 	/*
   5466 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5467 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5468 	 */
   5469 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5470 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5471 #if 0
   5472 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5473 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5474 #endif
   5475 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5476 
   5477 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5478 }
   5479 
   5480 /*
   5481  * Adjust TX and RX queue numbers which the system actulally uses.
   5482  *
   5483  * The numbers are affected by below parameters.
   5484  *     - The nubmer of hardware queues
   5485  *     - The number of MSI-X vectors (= "nvectors" argument)
   5486  *     - ncpu
   5487  */
   5488 static void
   5489 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5490 {
   5491 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5492 
   5493 	if (nvectors < 2) {
   5494 		sc->sc_nqueues = 1;
   5495 		return;
   5496 	}
   5497 
   5498 	switch (sc->sc_type) {
   5499 	case WM_T_82572:
   5500 		hw_ntxqueues = 2;
   5501 		hw_nrxqueues = 2;
   5502 		break;
   5503 	case WM_T_82574:
   5504 		hw_ntxqueues = 2;
   5505 		hw_nrxqueues = 2;
   5506 		break;
   5507 	case WM_T_82575:
   5508 		hw_ntxqueues = 4;
   5509 		hw_nrxqueues = 4;
   5510 		break;
   5511 	case WM_T_82576:
   5512 		hw_ntxqueues = 16;
   5513 		hw_nrxqueues = 16;
   5514 		break;
   5515 	case WM_T_82580:
   5516 	case WM_T_I350:
   5517 	case WM_T_I354:
   5518 		hw_ntxqueues = 8;
   5519 		hw_nrxqueues = 8;
   5520 		break;
   5521 	case WM_T_I210:
   5522 		hw_ntxqueues = 4;
   5523 		hw_nrxqueues = 4;
   5524 		break;
   5525 	case WM_T_I211:
   5526 		hw_ntxqueues = 2;
   5527 		hw_nrxqueues = 2;
   5528 		break;
   5529 		/*
   5530 		 * As below ethernet controllers does not support MSI-X,
   5531 		 * this driver let them not use multiqueue.
   5532 		 *     - WM_T_80003
   5533 		 *     - WM_T_ICH8
   5534 		 *     - WM_T_ICH9
   5535 		 *     - WM_T_ICH10
   5536 		 *     - WM_T_PCH
   5537 		 *     - WM_T_PCH2
   5538 		 *     - WM_T_PCH_LPT
   5539 		 */
   5540 	default:
   5541 		hw_ntxqueues = 1;
   5542 		hw_nrxqueues = 1;
   5543 		break;
   5544 	}
   5545 
   5546 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5547 
   5548 	/*
   5549 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5550 	 * the number of queues used actually.
   5551 	 */
   5552 	if (nvectors < hw_nqueues + 1)
   5553 		sc->sc_nqueues = nvectors - 1;
   5554 	else
   5555 		sc->sc_nqueues = hw_nqueues;
   5556 
   5557 	/*
   5558 	 * As queues more then cpus cannot improve scaling, we limit
   5559 	 * the number of queues used actually.
   5560 	 */
   5561 	if (ncpu < sc->sc_nqueues)
   5562 		sc->sc_nqueues = ncpu;
   5563 }
   5564 
   5565 static inline bool
   5566 wm_is_using_msix(struct wm_softc *sc)
   5567 {
   5568 
   5569 	return (sc->sc_nintrs > 1);
   5570 }
   5571 
   5572 static inline bool
   5573 wm_is_using_multiqueue(struct wm_softc *sc)
   5574 {
   5575 
   5576 	return (sc->sc_nqueues > 1);
   5577 }
   5578 
   5579 static int
   5580 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   5581 {
   5582 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5583 
   5584 	wmq->wmq_id = qidx;
   5585 	wmq->wmq_intr_idx = intr_idx;
   5586 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   5587 	    wm_handle_queue, wmq);
   5588 	if (wmq->wmq_si != NULL)
   5589 		return 0;
   5590 
   5591 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5592 	    wmq->wmq_id);
   5593 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5594 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5595 	return ENOMEM;
   5596 }
   5597 
   5598 /*
   5599  * Both single interrupt MSI and INTx can use this function.
   5600  */
   5601 static int
   5602 wm_setup_legacy(struct wm_softc *sc)
   5603 {
   5604 	pci_chipset_tag_t pc = sc->sc_pc;
   5605 	const char *intrstr = NULL;
   5606 	char intrbuf[PCI_INTRSTR_LEN];
   5607 	int error;
   5608 
   5609 	error = wm_alloc_txrx_queues(sc);
   5610 	if (error) {
   5611 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5612 		    error);
   5613 		return ENOMEM;
   5614 	}
   5615 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5616 	    sizeof(intrbuf));
   5617 #ifdef WM_MPSAFE
   5618 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5619 #endif
   5620 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5621 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5622 	if (sc->sc_ihs[0] == NULL) {
   5623 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5624 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5625 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5626 		return ENOMEM;
   5627 	}
   5628 
   5629 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5630 	sc->sc_nintrs = 1;
   5631 
   5632 	return wm_softint_establish_queue(sc, 0, 0);
   5633 }
   5634 
   5635 static int
   5636 wm_setup_msix(struct wm_softc *sc)
   5637 {
   5638 	void *vih;
   5639 	kcpuset_t *affinity;
   5640 	int qidx, error, intr_idx, txrx_established;
   5641 	pci_chipset_tag_t pc = sc->sc_pc;
   5642 	const char *intrstr = NULL;
   5643 	char intrbuf[PCI_INTRSTR_LEN];
   5644 	char intr_xname[INTRDEVNAMEBUF];
   5645 
   5646 	if (sc->sc_nqueues < ncpu) {
   5647 		/*
   5648 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5649 		 * interrupts start from CPU#1.
   5650 		 */
   5651 		sc->sc_affinity_offset = 1;
   5652 	} else {
   5653 		/*
   5654 		 * In this case, this device use all CPUs. So, we unify
   5655 		 * affinitied cpu_index to msix vector number for readability.
   5656 		 */
   5657 		sc->sc_affinity_offset = 0;
   5658 	}
   5659 
   5660 	error = wm_alloc_txrx_queues(sc);
   5661 	if (error) {
   5662 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5663 		    error);
   5664 		return ENOMEM;
   5665 	}
   5666 
   5667 	kcpuset_create(&affinity, false);
   5668 	intr_idx = 0;
   5669 
   5670 	/*
   5671 	 * TX and RX
   5672 	 */
   5673 	txrx_established = 0;
   5674 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5675 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5676 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5677 
   5678 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5679 		    sizeof(intrbuf));
   5680 #ifdef WM_MPSAFE
   5681 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5682 		    PCI_INTR_MPSAFE, true);
   5683 #endif
   5684 		memset(intr_xname, 0, sizeof(intr_xname));
   5685 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5686 		    device_xname(sc->sc_dev), qidx);
   5687 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5688 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5689 		if (vih == NULL) {
   5690 			aprint_error_dev(sc->sc_dev,
   5691 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5692 			    intrstr ? " at " : "",
   5693 			    intrstr ? intrstr : "");
   5694 
   5695 			goto fail;
   5696 		}
   5697 		kcpuset_zero(affinity);
   5698 		/* Round-robin affinity */
   5699 		kcpuset_set(affinity, affinity_to);
   5700 		error = interrupt_distribute(vih, affinity, NULL);
   5701 		if (error == 0) {
   5702 			aprint_normal_dev(sc->sc_dev,
   5703 			    "for TX and RX interrupting at %s affinity to %u\n",
   5704 			    intrstr, affinity_to);
   5705 		} else {
   5706 			aprint_normal_dev(sc->sc_dev,
   5707 			    "for TX and RX interrupting at %s\n", intrstr);
   5708 		}
   5709 		sc->sc_ihs[intr_idx] = vih;
   5710 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   5711 			goto fail;
   5712 		txrx_established++;
   5713 		intr_idx++;
   5714 	}
   5715 
   5716 	/* LINK */
   5717 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5718 	    sizeof(intrbuf));
   5719 #ifdef WM_MPSAFE
   5720 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5721 #endif
   5722 	memset(intr_xname, 0, sizeof(intr_xname));
   5723 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5724 	    device_xname(sc->sc_dev));
   5725 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5726 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5727 	if (vih == NULL) {
   5728 		aprint_error_dev(sc->sc_dev,
   5729 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5730 		    intrstr ? " at " : "",
   5731 		    intrstr ? intrstr : "");
   5732 
   5733 		goto fail;
   5734 	}
   5735 	/* Keep default affinity to LINK interrupt */
   5736 	aprint_normal_dev(sc->sc_dev,
   5737 	    "for LINK interrupting at %s\n", intrstr);
   5738 	sc->sc_ihs[intr_idx] = vih;
   5739 	sc->sc_link_intr_idx = intr_idx;
   5740 
   5741 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5742 	kcpuset_destroy(affinity);
   5743 	return 0;
   5744 
   5745  fail:
   5746 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5747 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5748 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5749 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5750 	}
   5751 
   5752 	kcpuset_destroy(affinity);
   5753 	return ENOMEM;
   5754 }
   5755 
   5756 static void
   5757 wm_unset_stopping_flags(struct wm_softc *sc)
   5758 {
   5759 	int i;
   5760 
   5761 	KASSERT(WM_CORE_LOCKED(sc));
   5762 
   5763 	/* Must unset stopping flags in ascending order. */
   5764 	for (i = 0; i < sc->sc_nqueues; i++) {
   5765 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5766 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5767 
   5768 		mutex_enter(txq->txq_lock);
   5769 		txq->txq_stopping = false;
   5770 		mutex_exit(txq->txq_lock);
   5771 
   5772 		mutex_enter(rxq->rxq_lock);
   5773 		rxq->rxq_stopping = false;
   5774 		mutex_exit(rxq->rxq_lock);
   5775 	}
   5776 
   5777 	sc->sc_core_stopping = false;
   5778 }
   5779 
   5780 static void
   5781 wm_set_stopping_flags(struct wm_softc *sc)
   5782 {
   5783 	int i;
   5784 
   5785 	KASSERT(WM_CORE_LOCKED(sc));
   5786 
   5787 	sc->sc_core_stopping = true;
   5788 
   5789 	/* Must set stopping flags in ascending order. */
   5790 	for (i = 0; i < sc->sc_nqueues; i++) {
   5791 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5792 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5793 
   5794 		mutex_enter(rxq->rxq_lock);
   5795 		rxq->rxq_stopping = true;
   5796 		mutex_exit(rxq->rxq_lock);
   5797 
   5798 		mutex_enter(txq->txq_lock);
   5799 		txq->txq_stopping = true;
   5800 		mutex_exit(txq->txq_lock);
   5801 	}
   5802 }
   5803 
   5804 /*
   5805  * Write interrupt interval value to ITR or EITR
   5806  */
   5807 static void
   5808 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5809 {
   5810 
   5811 	if (!wmq->wmq_set_itr)
   5812 		return;
   5813 
   5814 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5815 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5816 
   5817 		/*
   5818 		 * 82575 doesn't have CNT_INGR field.
   5819 		 * So, overwrite counter field by software.
   5820 		 */
   5821 		if (sc->sc_type == WM_T_82575)
   5822 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5823 		else
   5824 			eitr |= EITR_CNT_INGR;
   5825 
   5826 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5827 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5828 		/*
   5829 		 * 82574 has both ITR and EITR. SET EITR when we use
   5830 		 * the multi queue function with MSI-X.
   5831 		 */
   5832 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5833 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5834 	} else {
   5835 		KASSERT(wmq->wmq_id == 0);
   5836 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5837 	}
   5838 
   5839 	wmq->wmq_set_itr = false;
   5840 }
   5841 
   5842 /*
   5843  * TODO
   5844  * Below dynamic calculation of itr is almost the same as linux igb,
   5845  * however it does not fit to wm(4). So, we will have been disable AIM
   5846  * until we will find appropriate calculation of itr.
   5847  */
   5848 /*
   5849  * calculate interrupt interval value to be going to write register in
   5850  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5851  */
   5852 static void
   5853 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5854 {
   5855 #ifdef NOTYET
   5856 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5857 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5858 	uint32_t avg_size = 0;
   5859 	uint32_t new_itr;
   5860 
   5861 	if (rxq->rxq_packets)
   5862 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5863 	if (txq->txq_packets)
   5864 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5865 
   5866 	if (avg_size == 0) {
   5867 		new_itr = 450; /* restore default value */
   5868 		goto out;
   5869 	}
   5870 
   5871 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5872 	avg_size += 24;
   5873 
   5874 	/* Don't starve jumbo frames */
   5875 	avg_size = uimin(avg_size, 3000);
   5876 
   5877 	/* Give a little boost to mid-size frames */
   5878 	if ((avg_size > 300) && (avg_size < 1200))
   5879 		new_itr = avg_size / 3;
   5880 	else
   5881 		new_itr = avg_size / 2;
   5882 
   5883 out:
   5884 	/*
   5885 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5886 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5887 	 */
   5888 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5889 		new_itr *= 4;
   5890 
   5891 	if (new_itr != wmq->wmq_itr) {
   5892 		wmq->wmq_itr = new_itr;
   5893 		wmq->wmq_set_itr = true;
   5894 	} else
   5895 		wmq->wmq_set_itr = false;
   5896 
   5897 	rxq->rxq_packets = 0;
   5898 	rxq->rxq_bytes = 0;
   5899 	txq->txq_packets = 0;
   5900 	txq->txq_bytes = 0;
   5901 #endif
   5902 }
   5903 
   5904 static void
   5905 wm_init_sysctls(struct wm_softc *sc)
   5906 {
   5907 	struct sysctllog **log;
   5908 	const struct sysctlnode *rnode, *qnode, *cnode;
   5909 	int i, rv;
   5910 	const char *dvname;
   5911 
   5912 	log = &sc->sc_sysctllog;
   5913 	dvname = device_xname(sc->sc_dev);
   5914 
   5915 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5916 	    0, CTLTYPE_NODE, dvname,
   5917 	    SYSCTL_DESCR("wm information and settings"),
   5918 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5919 	if (rv != 0)
   5920 		goto err;
   5921 
   5922 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5923 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5924 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5925 	if (rv != 0)
   5926 		goto teardown;
   5927 
   5928 	for (i = 0; i < sc->sc_nqueues; i++) {
   5929 		struct wm_queue *wmq = &sc->sc_queue[i];
   5930 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5931 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5932 
   5933 		snprintf(sc->sc_queue[i].sysctlname,
   5934 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   5935 
   5936 		if (sysctl_createv(log, 0, &rnode, &qnode,
   5937 		    0, CTLTYPE_NODE,
   5938 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   5939 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   5940 			break;
   5941 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5942 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5943 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   5944 		    NULL, 0, &txq->txq_free,
   5945 		    0, CTL_CREATE, CTL_EOL) != 0)
   5946 			break;
   5947 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5948 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5949 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   5950 		    NULL, 0, &txq->txq_next,
   5951 		    0, CTL_CREATE, CTL_EOL) != 0)
   5952 			break;
   5953 
   5954 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5955 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5956 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   5957 		    NULL, 0, &rxq->rxq_ptr,
   5958 		    0, CTL_CREATE, CTL_EOL) != 0)
   5959 			break;
   5960 	}
   5961 
   5962 #ifdef WM_DEBUG
   5963 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5964 	    CTLTYPE_INT, "debug_flags",
   5965 	    SYSCTL_DESCR(
   5966 		    "Debug flags:\n"	\
   5967 		    "\t0x01 LINK\n"	\
   5968 		    "\t0x02 TX\n"	\
   5969 		    "\t0x04 RX\n"	\
   5970 		    "\t0x08 GMII\n"	\
   5971 		    "\t0x10 MANAGE\n"	\
   5972 		    "\t0x20 NVM\n"	\
   5973 		    "\t0x40 INIT\n"	\
   5974 		    "\t0x80 LOCK"),
   5975 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   5976 	if (rv != 0)
   5977 		goto teardown;
   5978 #endif
   5979 
   5980 	return;
   5981 
   5982 teardown:
   5983 	sysctl_teardown(log);
   5984 err:
   5985 	sc->sc_sysctllog = NULL;
   5986 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   5987 	    __func__, rv);
   5988 }
   5989 
   5990 /*
   5991  * wm_init:		[ifnet interface function]
   5992  *
   5993  *	Initialize the interface.
   5994  */
   5995 static int
   5996 wm_init(struct ifnet *ifp)
   5997 {
   5998 	struct wm_softc *sc = ifp->if_softc;
   5999 	int ret;
   6000 
   6001 	WM_CORE_LOCK(sc);
   6002 	ret = wm_init_locked(ifp);
   6003 	WM_CORE_UNLOCK(sc);
   6004 
   6005 	return ret;
   6006 }
   6007 
   6008 static int
   6009 wm_init_locked(struct ifnet *ifp)
   6010 {
   6011 	struct wm_softc *sc = ifp->if_softc;
   6012 	struct ethercom *ec = &sc->sc_ethercom;
   6013 	int i, j, trynum, error = 0;
   6014 	uint32_t reg, sfp_mask = 0;
   6015 
   6016 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6017 		device_xname(sc->sc_dev), __func__));
   6018 	KASSERT(WM_CORE_LOCKED(sc));
   6019 
   6020 	/*
   6021 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6022 	 * There is a small but measurable benefit to avoiding the adjusment
   6023 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6024 	 * on such platforms.  One possibility is that the DMA itself is
   6025 	 * slightly more efficient if the front of the entire packet (instead
   6026 	 * of the front of the headers) is aligned.
   6027 	 *
   6028 	 * Note we must always set align_tweak to 0 if we are using
   6029 	 * jumbo frames.
   6030 	 */
   6031 #ifdef __NO_STRICT_ALIGNMENT
   6032 	sc->sc_align_tweak = 0;
   6033 #else
   6034 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6035 		sc->sc_align_tweak = 0;
   6036 	else
   6037 		sc->sc_align_tweak = 2;
   6038 #endif /* __NO_STRICT_ALIGNMENT */
   6039 
   6040 	/* Cancel any pending I/O. */
   6041 	wm_stop_locked(ifp, false, false);
   6042 
   6043 	/* Update statistics before reset */
   6044 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6045 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6046 
   6047 	/* PCH_SPT hardware workaround */
   6048 	if (sc->sc_type == WM_T_PCH_SPT)
   6049 		wm_flush_desc_rings(sc);
   6050 
   6051 	/* Reset the chip to a known state. */
   6052 	wm_reset(sc);
   6053 
   6054 	/*
   6055 	 * AMT based hardware can now take control from firmware
   6056 	 * Do this after reset.
   6057 	 */
   6058 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6059 		wm_get_hw_control(sc);
   6060 
   6061 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6062 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6063 		wm_legacy_irq_quirk_spt(sc);
   6064 
   6065 	/* Init hardware bits */
   6066 	wm_initialize_hardware_bits(sc);
   6067 
   6068 	/* Reset the PHY. */
   6069 	if (sc->sc_flags & WM_F_HAS_MII)
   6070 		wm_gmii_reset(sc);
   6071 
   6072 	if (sc->sc_type >= WM_T_ICH8) {
   6073 		reg = CSR_READ(sc, WMREG_GCR);
   6074 		/*
   6075 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6076 		 * default after reset.
   6077 		 */
   6078 		if (sc->sc_type == WM_T_ICH8)
   6079 			reg |= GCR_NO_SNOOP_ALL;
   6080 		else
   6081 			reg &= ~GCR_NO_SNOOP_ALL;
   6082 		CSR_WRITE(sc, WMREG_GCR, reg);
   6083 	}
   6084 
   6085 	if ((sc->sc_type >= WM_T_ICH8)
   6086 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6087 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6088 
   6089 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6090 		reg |= CTRL_EXT_RO_DIS;
   6091 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6092 	}
   6093 
   6094 	/* Calculate (E)ITR value */
   6095 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6096 		/*
   6097 		 * For NEWQUEUE's EITR (except for 82575).
   6098 		 * 82575's EITR should be set same throttling value as other
   6099 		 * old controllers' ITR because the interrupt/sec calculation
   6100 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6101 		 *
   6102 		 * 82574's EITR should be set same throttling value as ITR.
   6103 		 *
   6104 		 * For N interrupts/sec, set this value to:
   6105 		 * 1,000,000 / N in contrast to ITR throttoling value.
   6106 		 */
   6107 		sc->sc_itr_init = 450;
   6108 	} else if (sc->sc_type >= WM_T_82543) {
   6109 		/*
   6110 		 * Set up the interrupt throttling register (units of 256ns)
   6111 		 * Note that a footnote in Intel's documentation says this
   6112 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6113 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6114 		 * that that is also true for the 1024ns units of the other
   6115 		 * interrupt-related timer registers -- so, really, we ought
   6116 		 * to divide this value by 4 when the link speed is low.
   6117 		 *
   6118 		 * XXX implement this division at link speed change!
   6119 		 */
   6120 
   6121 		/*
   6122 		 * For N interrupts/sec, set this value to:
   6123 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6124 		 * absolute and packet timer values to this value
   6125 		 * divided by 4 to get "simple timer" behavior.
   6126 		 */
   6127 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6128 	}
   6129 
   6130 	error = wm_init_txrx_queues(sc);
   6131 	if (error)
   6132 		goto out;
   6133 
   6134 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6135 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6136 	    (sc->sc_type >= WM_T_82575))
   6137 		wm_serdes_power_up_link_82575(sc);
   6138 
   6139 	/* Clear out the VLAN table -- we don't use it (yet). */
   6140 	CSR_WRITE(sc, WMREG_VET, 0);
   6141 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6142 		trynum = 10; /* Due to hw errata */
   6143 	else
   6144 		trynum = 1;
   6145 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6146 		for (j = 0; j < trynum; j++)
   6147 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6148 
   6149 	/*
   6150 	 * Set up flow-control parameters.
   6151 	 *
   6152 	 * XXX Values could probably stand some tuning.
   6153 	 */
   6154 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6155 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6156 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6157 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6158 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6159 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6160 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6161 	}
   6162 
   6163 	sc->sc_fcrtl = FCRTL_DFLT;
   6164 	if (sc->sc_type < WM_T_82543) {
   6165 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6166 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6167 	} else {
   6168 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6169 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6170 	}
   6171 
   6172 	if (sc->sc_type == WM_T_80003)
   6173 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6174 	else
   6175 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6176 
   6177 	/* Writes the control register. */
   6178 	wm_set_vlan(sc);
   6179 
   6180 	if (sc->sc_flags & WM_F_HAS_MII) {
   6181 		uint16_t kmreg;
   6182 
   6183 		switch (sc->sc_type) {
   6184 		case WM_T_80003:
   6185 		case WM_T_ICH8:
   6186 		case WM_T_ICH9:
   6187 		case WM_T_ICH10:
   6188 		case WM_T_PCH:
   6189 		case WM_T_PCH2:
   6190 		case WM_T_PCH_LPT:
   6191 		case WM_T_PCH_SPT:
   6192 		case WM_T_PCH_CNP:
   6193 			/*
   6194 			 * Set the mac to wait the maximum time between each
   6195 			 * iteration and increase the max iterations when
   6196 			 * polling the phy; this fixes erroneous timeouts at
   6197 			 * 10Mbps.
   6198 			 */
   6199 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6200 			    0xFFFF);
   6201 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6202 			    &kmreg);
   6203 			kmreg |= 0x3F;
   6204 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6205 			    kmreg);
   6206 			break;
   6207 		default:
   6208 			break;
   6209 		}
   6210 
   6211 		if (sc->sc_type == WM_T_80003) {
   6212 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6213 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6214 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6215 
   6216 			/* Bypass RX and TX FIFO's */
   6217 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6218 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6219 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6220 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6221 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6222 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6223 		}
   6224 	}
   6225 #if 0
   6226 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6227 #endif
   6228 
   6229 	/* Set up checksum offload parameters. */
   6230 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6231 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6232 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6233 		reg |= RXCSUM_IPOFL;
   6234 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6235 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6236 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6237 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6238 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6239 
   6240 	/* Set registers about MSI-X */
   6241 	if (wm_is_using_msix(sc)) {
   6242 		uint32_t ivar, qintr_idx;
   6243 		struct wm_queue *wmq;
   6244 		unsigned int qid;
   6245 
   6246 		if (sc->sc_type == WM_T_82575) {
   6247 			/* Interrupt control */
   6248 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6249 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6250 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6251 
   6252 			/* TX and RX */
   6253 			for (i = 0; i < sc->sc_nqueues; i++) {
   6254 				wmq = &sc->sc_queue[i];
   6255 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6256 				    EITR_TX_QUEUE(wmq->wmq_id)
   6257 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6258 			}
   6259 			/* Link status */
   6260 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6261 			    EITR_OTHER);
   6262 		} else if (sc->sc_type == WM_T_82574) {
   6263 			/* Interrupt control */
   6264 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6265 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6266 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6267 
   6268 			/*
   6269 			 * Workaround issue with spurious interrupts
   6270 			 * in MSI-X mode.
   6271 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6272 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6273 			 */
   6274 			reg = CSR_READ(sc, WMREG_RFCTL);
   6275 			reg |= WMREG_RFCTL_ACKDIS;
   6276 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6277 
   6278 			ivar = 0;
   6279 			/* TX and RX */
   6280 			for (i = 0; i < sc->sc_nqueues; i++) {
   6281 				wmq = &sc->sc_queue[i];
   6282 				qid = wmq->wmq_id;
   6283 				qintr_idx = wmq->wmq_intr_idx;
   6284 
   6285 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6286 				    IVAR_TX_MASK_Q_82574(qid));
   6287 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6288 				    IVAR_RX_MASK_Q_82574(qid));
   6289 			}
   6290 			/* Link status */
   6291 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6292 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6293 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6294 		} else {
   6295 			/* Interrupt control */
   6296 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6297 			    | GPIE_EIAME | GPIE_PBA);
   6298 
   6299 			switch (sc->sc_type) {
   6300 			case WM_T_82580:
   6301 			case WM_T_I350:
   6302 			case WM_T_I354:
   6303 			case WM_T_I210:
   6304 			case WM_T_I211:
   6305 				/* TX and RX */
   6306 				for (i = 0; i < sc->sc_nqueues; i++) {
   6307 					wmq = &sc->sc_queue[i];
   6308 					qid = wmq->wmq_id;
   6309 					qintr_idx = wmq->wmq_intr_idx;
   6310 
   6311 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6312 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6313 					ivar |= __SHIFTIN((qintr_idx
   6314 						| IVAR_VALID),
   6315 					    IVAR_TX_MASK_Q(qid));
   6316 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6317 					ivar |= __SHIFTIN((qintr_idx
   6318 						| IVAR_VALID),
   6319 					    IVAR_RX_MASK_Q(qid));
   6320 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6321 				}
   6322 				break;
   6323 			case WM_T_82576:
   6324 				/* TX and RX */
   6325 				for (i = 0; i < sc->sc_nqueues; i++) {
   6326 					wmq = &sc->sc_queue[i];
   6327 					qid = wmq->wmq_id;
   6328 					qintr_idx = wmq->wmq_intr_idx;
   6329 
   6330 					ivar = CSR_READ(sc,
   6331 					    WMREG_IVAR_Q_82576(qid));
   6332 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6333 					ivar |= __SHIFTIN((qintr_idx
   6334 						| IVAR_VALID),
   6335 					    IVAR_TX_MASK_Q_82576(qid));
   6336 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6337 					ivar |= __SHIFTIN((qintr_idx
   6338 						| IVAR_VALID),
   6339 					    IVAR_RX_MASK_Q_82576(qid));
   6340 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6341 					    ivar);
   6342 				}
   6343 				break;
   6344 			default:
   6345 				break;
   6346 			}
   6347 
   6348 			/* Link status */
   6349 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6350 			    IVAR_MISC_OTHER);
   6351 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6352 		}
   6353 
   6354 		if (wm_is_using_multiqueue(sc)) {
   6355 			wm_init_rss(sc);
   6356 
   6357 			/*
   6358 			** NOTE: Receive Full-Packet Checksum Offload
   6359 			** is mutually exclusive with Multiqueue. However
   6360 			** this is not the same as TCP/IP checksums which
   6361 			** still work.
   6362 			*/
   6363 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6364 			reg |= RXCSUM_PCSD;
   6365 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6366 		}
   6367 	}
   6368 
   6369 	/* Set up the interrupt registers. */
   6370 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6371 
   6372 	/* Enable SFP module insertion interrupt if it's required */
   6373 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6374 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6375 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6376 		sfp_mask = ICR_GPI(0);
   6377 	}
   6378 
   6379 	if (wm_is_using_msix(sc)) {
   6380 		uint32_t mask;
   6381 		struct wm_queue *wmq;
   6382 
   6383 		switch (sc->sc_type) {
   6384 		case WM_T_82574:
   6385 			mask = 0;
   6386 			for (i = 0; i < sc->sc_nqueues; i++) {
   6387 				wmq = &sc->sc_queue[i];
   6388 				mask |= ICR_TXQ(wmq->wmq_id);
   6389 				mask |= ICR_RXQ(wmq->wmq_id);
   6390 			}
   6391 			mask |= ICR_OTHER;
   6392 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6393 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6394 			break;
   6395 		default:
   6396 			if (sc->sc_type == WM_T_82575) {
   6397 				mask = 0;
   6398 				for (i = 0; i < sc->sc_nqueues; i++) {
   6399 					wmq = &sc->sc_queue[i];
   6400 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6401 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6402 				}
   6403 				mask |= EITR_OTHER;
   6404 			} else {
   6405 				mask = 0;
   6406 				for (i = 0; i < sc->sc_nqueues; i++) {
   6407 					wmq = &sc->sc_queue[i];
   6408 					mask |= 1 << wmq->wmq_intr_idx;
   6409 				}
   6410 				mask |= 1 << sc->sc_link_intr_idx;
   6411 			}
   6412 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6413 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6414 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6415 
   6416 			/* For other interrupts */
   6417 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6418 			break;
   6419 		}
   6420 	} else {
   6421 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6422 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6423 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6424 	}
   6425 
   6426 	/* Set up the inter-packet gap. */
   6427 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6428 
   6429 	if (sc->sc_type >= WM_T_82543) {
   6430 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6431 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6432 			wm_itrs_writereg(sc, wmq);
   6433 		}
   6434 		/*
   6435 		 * Link interrupts occur much less than TX
   6436 		 * interrupts and RX interrupts. So, we don't
   6437 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6438 		 * FreeBSD's if_igb.
   6439 		 */
   6440 	}
   6441 
   6442 	/* Set the VLAN ethernetype. */
   6443 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6444 
   6445 	/*
   6446 	 * Set up the transmit control register; we start out with
   6447 	 * a collision distance suitable for FDX, but update it whe
   6448 	 * we resolve the media type.
   6449 	 */
   6450 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6451 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6452 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6453 	if (sc->sc_type >= WM_T_82571)
   6454 		sc->sc_tctl |= TCTL_MULR;
   6455 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6456 
   6457 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6458 		/* Write TDT after TCTL.EN is set. See the document. */
   6459 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6460 	}
   6461 
   6462 	if (sc->sc_type == WM_T_80003) {
   6463 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6464 		reg &= ~TCTL_EXT_GCEX_MASK;
   6465 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6466 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6467 	}
   6468 
   6469 	/* Set the media. */
   6470 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6471 		goto out;
   6472 
   6473 	/* Configure for OS presence */
   6474 	wm_init_manageability(sc);
   6475 
   6476 	/*
   6477 	 * Set up the receive control register; we actually program the
   6478 	 * register when we set the receive filter. Use multicast address
   6479 	 * offset type 0.
   6480 	 *
   6481 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6482 	 * don't enable that feature.
   6483 	 */
   6484 	sc->sc_mchash_type = 0;
   6485 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6486 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6487 
   6488 	/* 82574 use one buffer extended Rx descriptor. */
   6489 	if (sc->sc_type == WM_T_82574)
   6490 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6491 
   6492 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   6493 		sc->sc_rctl |= RCTL_SECRC;
   6494 
   6495 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6496 	    && (ifp->if_mtu > ETHERMTU)) {
   6497 		sc->sc_rctl |= RCTL_LPE;
   6498 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6499 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6500 	}
   6501 
   6502 	if (MCLBYTES == 2048)
   6503 		sc->sc_rctl |= RCTL_2k;
   6504 	else {
   6505 		if (sc->sc_type >= WM_T_82543) {
   6506 			switch (MCLBYTES) {
   6507 			case 4096:
   6508 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6509 				break;
   6510 			case 8192:
   6511 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6512 				break;
   6513 			case 16384:
   6514 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6515 				break;
   6516 			default:
   6517 				panic("wm_init: MCLBYTES %d unsupported",
   6518 				    MCLBYTES);
   6519 				break;
   6520 			}
   6521 		} else
   6522 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6523 	}
   6524 
   6525 	/* Enable ECC */
   6526 	switch (sc->sc_type) {
   6527 	case WM_T_82571:
   6528 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6529 		reg |= PBA_ECC_CORR_EN;
   6530 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6531 		break;
   6532 	case WM_T_PCH_LPT:
   6533 	case WM_T_PCH_SPT:
   6534 	case WM_T_PCH_CNP:
   6535 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6536 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6537 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6538 
   6539 		sc->sc_ctrl |= CTRL_MEHE;
   6540 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6541 		break;
   6542 	default:
   6543 		break;
   6544 	}
   6545 
   6546 	/*
   6547 	 * Set the receive filter.
   6548 	 *
   6549 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6550 	 * the setting of RCTL.EN in wm_set_filter()
   6551 	 */
   6552 	wm_set_filter(sc);
   6553 
   6554 	/* On 575 and later set RDT only if RX enabled */
   6555 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6556 		int qidx;
   6557 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6558 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6559 			for (i = 0; i < WM_NRXDESC; i++) {
   6560 				mutex_enter(rxq->rxq_lock);
   6561 				wm_init_rxdesc(rxq, i);
   6562 				mutex_exit(rxq->rxq_lock);
   6563 
   6564 			}
   6565 		}
   6566 	}
   6567 
   6568 	wm_unset_stopping_flags(sc);
   6569 
   6570 	/* Start the one second link check clock. */
   6571 	callout_schedule(&sc->sc_tick_ch, hz);
   6572 
   6573 	/* ...all done! */
   6574 	ifp->if_flags |= IFF_RUNNING;
   6575 
   6576  out:
   6577 	/* Save last flags for the callback */
   6578 	sc->sc_if_flags = ifp->if_flags;
   6579 	sc->sc_ec_capenable = ec->ec_capenable;
   6580 	if (error)
   6581 		log(LOG_ERR, "%s: interface not running\n",
   6582 		    device_xname(sc->sc_dev));
   6583 	return error;
   6584 }
   6585 
   6586 /*
   6587  * wm_stop:		[ifnet interface function]
   6588  *
   6589  *	Stop transmission on the interface.
   6590  */
   6591 static void
   6592 wm_stop(struct ifnet *ifp, int disable)
   6593 {
   6594 	struct wm_softc *sc = ifp->if_softc;
   6595 
   6596 	ASSERT_SLEEPABLE();
   6597 
   6598 	WM_CORE_LOCK(sc);
   6599 	wm_stop_locked(ifp, disable ? true : false, true);
   6600 	WM_CORE_UNLOCK(sc);
   6601 
   6602 	/*
   6603 	 * After wm_set_stopping_flags(), it is guaranteed
   6604 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6605 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6606 	 * because it can sleep...
   6607 	 * so, call workqueue_wait() here.
   6608 	 */
   6609 	for (int i = 0; i < sc->sc_nqueues; i++)
   6610 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6611 }
   6612 
   6613 static void
   6614 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   6615 {
   6616 	struct wm_softc *sc = ifp->if_softc;
   6617 	struct wm_txsoft *txs;
   6618 	int i, qidx;
   6619 
   6620 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6621 		device_xname(sc->sc_dev), __func__));
   6622 	KASSERT(WM_CORE_LOCKED(sc));
   6623 
   6624 	wm_set_stopping_flags(sc);
   6625 
   6626 	if (sc->sc_flags & WM_F_HAS_MII) {
   6627 		/* Down the MII. */
   6628 		mii_down(&sc->sc_mii);
   6629 	} else {
   6630 #if 0
   6631 		/* Should we clear PHY's status properly? */
   6632 		wm_reset(sc);
   6633 #endif
   6634 	}
   6635 
   6636 	/* Stop the transmit and receive processes. */
   6637 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6638 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6639 	sc->sc_rctl &= ~RCTL_EN;
   6640 
   6641 	/*
   6642 	 * Clear the interrupt mask to ensure the device cannot assert its
   6643 	 * interrupt line.
   6644 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6645 	 * service any currently pending or shared interrupt.
   6646 	 */
   6647 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6648 	sc->sc_icr = 0;
   6649 	if (wm_is_using_msix(sc)) {
   6650 		if (sc->sc_type != WM_T_82574) {
   6651 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6652 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6653 		} else
   6654 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6655 	}
   6656 
   6657 	/*
   6658 	 * Stop callouts after interrupts are disabled; if we have
   6659 	 * to wait for them, we will be releasing the CORE_LOCK
   6660 	 * briefly, which will unblock interrupts on the current CPU.
   6661 	 */
   6662 
   6663 	/* Stop the one second clock. */
   6664 	if (wait)
   6665 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   6666 	else
   6667 		callout_stop(&sc->sc_tick_ch);
   6668 
   6669 	/* Stop the 82547 Tx FIFO stall check timer. */
   6670 	if (sc->sc_type == WM_T_82547) {
   6671 		if (wait)
   6672 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   6673 		else
   6674 			callout_stop(&sc->sc_txfifo_ch);
   6675 	}
   6676 
   6677 	/* Release any queued transmit buffers. */
   6678 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6679 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6680 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6681 		struct mbuf *m;
   6682 
   6683 		mutex_enter(txq->txq_lock);
   6684 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6685 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6686 			txs = &txq->txq_soft[i];
   6687 			if (txs->txs_mbuf != NULL) {
   6688 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6689 				m_freem(txs->txs_mbuf);
   6690 				txs->txs_mbuf = NULL;
   6691 			}
   6692 		}
   6693 		/* Drain txq_interq */
   6694 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6695 			m_freem(m);
   6696 		mutex_exit(txq->txq_lock);
   6697 	}
   6698 
   6699 	/* Mark the interface as down and cancel the watchdog timer. */
   6700 	ifp->if_flags &= ~IFF_RUNNING;
   6701 
   6702 	if (disable) {
   6703 		for (i = 0; i < sc->sc_nqueues; i++) {
   6704 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6705 			mutex_enter(rxq->rxq_lock);
   6706 			wm_rxdrain(rxq);
   6707 			mutex_exit(rxq->rxq_lock);
   6708 		}
   6709 	}
   6710 
   6711 #if 0 /* notyet */
   6712 	if (sc->sc_type >= WM_T_82544)
   6713 		CSR_WRITE(sc, WMREG_WUC, 0);
   6714 #endif
   6715 }
   6716 
   6717 static void
   6718 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6719 {
   6720 	struct mbuf *m;
   6721 	int i;
   6722 
   6723 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6724 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6725 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6726 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6727 		    m->m_data, m->m_len, m->m_flags);
   6728 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6729 	    i, i == 1 ? "" : "s");
   6730 }
   6731 
   6732 /*
   6733  * wm_82547_txfifo_stall:
   6734  *
   6735  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6736  *	reset the FIFO pointers, and restart packet transmission.
   6737  */
   6738 static void
   6739 wm_82547_txfifo_stall(void *arg)
   6740 {
   6741 	struct wm_softc *sc = arg;
   6742 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6743 
   6744 	mutex_enter(txq->txq_lock);
   6745 
   6746 	if (txq->txq_stopping)
   6747 		goto out;
   6748 
   6749 	if (txq->txq_fifo_stall) {
   6750 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6751 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6752 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6753 			/*
   6754 			 * Packets have drained.  Stop transmitter, reset
   6755 			 * FIFO pointers, restart transmitter, and kick
   6756 			 * the packet queue.
   6757 			 */
   6758 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6759 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6760 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6761 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6762 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6763 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6764 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6765 			CSR_WRITE_FLUSH(sc);
   6766 
   6767 			txq->txq_fifo_head = 0;
   6768 			txq->txq_fifo_stall = 0;
   6769 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6770 		} else {
   6771 			/*
   6772 			 * Still waiting for packets to drain; try again in
   6773 			 * another tick.
   6774 			 */
   6775 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6776 		}
   6777 	}
   6778 
   6779 out:
   6780 	mutex_exit(txq->txq_lock);
   6781 }
   6782 
   6783 /*
   6784  * wm_82547_txfifo_bugchk:
   6785  *
   6786  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6787  *	prevent enqueueing a packet that would wrap around the end
   6788  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6789  *
   6790  *	We do this by checking the amount of space before the end
   6791  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6792  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6793  *	the internal FIFO pointers to the beginning, and restart
   6794  *	transmission on the interface.
   6795  */
   6796 #define	WM_FIFO_HDR		0x10
   6797 #define	WM_82547_PAD_LEN	0x3e0
   6798 static int
   6799 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6800 {
   6801 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6802 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6803 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6804 
   6805 	/* Just return if already stalled. */
   6806 	if (txq->txq_fifo_stall)
   6807 		return 1;
   6808 
   6809 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6810 		/* Stall only occurs in half-duplex mode. */
   6811 		goto send_packet;
   6812 	}
   6813 
   6814 	if (len >= WM_82547_PAD_LEN + space) {
   6815 		txq->txq_fifo_stall = 1;
   6816 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6817 		return 1;
   6818 	}
   6819 
   6820  send_packet:
   6821 	txq->txq_fifo_head += len;
   6822 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6823 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6824 
   6825 	return 0;
   6826 }
   6827 
   6828 static int
   6829 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6830 {
   6831 	int error;
   6832 
   6833 	/*
   6834 	 * Allocate the control data structures, and create and load the
   6835 	 * DMA map for it.
   6836 	 *
   6837 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6838 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6839 	 * both sets within the same 4G segment.
   6840 	 */
   6841 	if (sc->sc_type < WM_T_82544)
   6842 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6843 	else
   6844 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6845 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6846 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6847 	else
   6848 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6849 
   6850 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6851 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6852 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6853 		aprint_error_dev(sc->sc_dev,
   6854 		    "unable to allocate TX control data, error = %d\n",
   6855 		    error);
   6856 		goto fail_0;
   6857 	}
   6858 
   6859 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6860 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6861 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6862 		aprint_error_dev(sc->sc_dev,
   6863 		    "unable to map TX control data, error = %d\n", error);
   6864 		goto fail_1;
   6865 	}
   6866 
   6867 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6868 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6869 		aprint_error_dev(sc->sc_dev,
   6870 		    "unable to create TX control data DMA map, error = %d\n",
   6871 		    error);
   6872 		goto fail_2;
   6873 	}
   6874 
   6875 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6876 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6877 		aprint_error_dev(sc->sc_dev,
   6878 		    "unable to load TX control data DMA map, error = %d\n",
   6879 		    error);
   6880 		goto fail_3;
   6881 	}
   6882 
   6883 	return 0;
   6884 
   6885  fail_3:
   6886 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6887  fail_2:
   6888 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6889 	    WM_TXDESCS_SIZE(txq));
   6890  fail_1:
   6891 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6892  fail_0:
   6893 	return error;
   6894 }
   6895 
   6896 static void
   6897 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6898 {
   6899 
   6900 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6901 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6902 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6903 	    WM_TXDESCS_SIZE(txq));
   6904 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6905 }
   6906 
   6907 static int
   6908 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6909 {
   6910 	int error;
   6911 	size_t rxq_descs_size;
   6912 
   6913 	/*
   6914 	 * Allocate the control data structures, and create and load the
   6915 	 * DMA map for it.
   6916 	 *
   6917 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6918 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6919 	 * both sets within the same 4G segment.
   6920 	 */
   6921 	rxq->rxq_ndesc = WM_NRXDESC;
   6922 	if (sc->sc_type == WM_T_82574)
   6923 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6924 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6925 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6926 	else
   6927 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6928 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6929 
   6930 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6931 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6932 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6933 		aprint_error_dev(sc->sc_dev,
   6934 		    "unable to allocate RX control data, error = %d\n",
   6935 		    error);
   6936 		goto fail_0;
   6937 	}
   6938 
   6939 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6940 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6941 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6942 		aprint_error_dev(sc->sc_dev,
   6943 		    "unable to map RX control data, error = %d\n", error);
   6944 		goto fail_1;
   6945 	}
   6946 
   6947 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6948 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6949 		aprint_error_dev(sc->sc_dev,
   6950 		    "unable to create RX control data DMA map, error = %d\n",
   6951 		    error);
   6952 		goto fail_2;
   6953 	}
   6954 
   6955 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6956 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6957 		aprint_error_dev(sc->sc_dev,
   6958 		    "unable to load RX control data DMA map, error = %d\n",
   6959 		    error);
   6960 		goto fail_3;
   6961 	}
   6962 
   6963 	return 0;
   6964 
   6965  fail_3:
   6966 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6967  fail_2:
   6968 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6969 	    rxq_descs_size);
   6970  fail_1:
   6971 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6972  fail_0:
   6973 	return error;
   6974 }
   6975 
   6976 static void
   6977 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6978 {
   6979 
   6980 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6981 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6982 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6983 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6984 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6985 }
   6986 
   6987 
   6988 static int
   6989 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6990 {
   6991 	int i, error;
   6992 
   6993 	/* Create the transmit buffer DMA maps. */
   6994 	WM_TXQUEUELEN(txq) =
   6995 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6996 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6997 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6998 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6999 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7000 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7001 			aprint_error_dev(sc->sc_dev,
   7002 			    "unable to create Tx DMA map %d, error = %d\n",
   7003 			    i, error);
   7004 			goto fail;
   7005 		}
   7006 	}
   7007 
   7008 	return 0;
   7009 
   7010  fail:
   7011 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7012 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7013 			bus_dmamap_destroy(sc->sc_dmat,
   7014 			    txq->txq_soft[i].txs_dmamap);
   7015 	}
   7016 	return error;
   7017 }
   7018 
   7019 static void
   7020 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7021 {
   7022 	int i;
   7023 
   7024 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7025 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7026 			bus_dmamap_destroy(sc->sc_dmat,
   7027 			    txq->txq_soft[i].txs_dmamap);
   7028 	}
   7029 }
   7030 
   7031 static int
   7032 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7033 {
   7034 	int i, error;
   7035 
   7036 	/* Create the receive buffer DMA maps. */
   7037 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7038 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7039 			    MCLBYTES, 0, 0,
   7040 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7041 			aprint_error_dev(sc->sc_dev,
   7042 			    "unable to create Rx DMA map %d error = %d\n",
   7043 			    i, error);
   7044 			goto fail;
   7045 		}
   7046 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7047 	}
   7048 
   7049 	return 0;
   7050 
   7051  fail:
   7052 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7053 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7054 			bus_dmamap_destroy(sc->sc_dmat,
   7055 			    rxq->rxq_soft[i].rxs_dmamap);
   7056 	}
   7057 	return error;
   7058 }
   7059 
   7060 static void
   7061 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7062 {
   7063 	int i;
   7064 
   7065 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7066 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7067 			bus_dmamap_destroy(sc->sc_dmat,
   7068 			    rxq->rxq_soft[i].rxs_dmamap);
   7069 	}
   7070 }
   7071 
   7072 /*
   7073  * wm_alloc_quques:
   7074  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7075  */
   7076 static int
   7077 wm_alloc_txrx_queues(struct wm_softc *sc)
   7078 {
   7079 	int i, error, tx_done, rx_done;
   7080 
   7081 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7082 	    KM_SLEEP);
   7083 	if (sc->sc_queue == NULL) {
   7084 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7085 		error = ENOMEM;
   7086 		goto fail_0;
   7087 	}
   7088 
   7089 	/* For transmission */
   7090 	error = 0;
   7091 	tx_done = 0;
   7092 	for (i = 0; i < sc->sc_nqueues; i++) {
   7093 #ifdef WM_EVENT_COUNTERS
   7094 		int j;
   7095 		const char *xname;
   7096 #endif
   7097 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7098 		txq->txq_sc = sc;
   7099 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7100 
   7101 		error = wm_alloc_tx_descs(sc, txq);
   7102 		if (error)
   7103 			break;
   7104 		error = wm_alloc_tx_buffer(sc, txq);
   7105 		if (error) {
   7106 			wm_free_tx_descs(sc, txq);
   7107 			break;
   7108 		}
   7109 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7110 		if (txq->txq_interq == NULL) {
   7111 			wm_free_tx_descs(sc, txq);
   7112 			wm_free_tx_buffer(sc, txq);
   7113 			error = ENOMEM;
   7114 			break;
   7115 		}
   7116 
   7117 #ifdef WM_EVENT_COUNTERS
   7118 		xname = device_xname(sc->sc_dev);
   7119 
   7120 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7121 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7122 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7123 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7124 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7125 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7126 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7127 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7128 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7129 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7130 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7131 
   7132 		for (j = 0; j < WM_NTXSEGS; j++) {
   7133 			snprintf(txq->txq_txseg_evcnt_names[j],
   7134 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   7135 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   7136 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7137 		}
   7138 
   7139 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7140 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7141 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7142 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7143 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7144 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7145 #endif /* WM_EVENT_COUNTERS */
   7146 
   7147 		tx_done++;
   7148 	}
   7149 	if (error)
   7150 		goto fail_1;
   7151 
   7152 	/* For receive */
   7153 	error = 0;
   7154 	rx_done = 0;
   7155 	for (i = 0; i < sc->sc_nqueues; i++) {
   7156 #ifdef WM_EVENT_COUNTERS
   7157 		const char *xname;
   7158 #endif
   7159 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7160 		rxq->rxq_sc = sc;
   7161 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7162 
   7163 		error = wm_alloc_rx_descs(sc, rxq);
   7164 		if (error)
   7165 			break;
   7166 
   7167 		error = wm_alloc_rx_buffer(sc, rxq);
   7168 		if (error) {
   7169 			wm_free_rx_descs(sc, rxq);
   7170 			break;
   7171 		}
   7172 
   7173 #ifdef WM_EVENT_COUNTERS
   7174 		xname = device_xname(sc->sc_dev);
   7175 
   7176 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7177 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7178 
   7179 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7180 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7181 #endif /* WM_EVENT_COUNTERS */
   7182 
   7183 		rx_done++;
   7184 	}
   7185 	if (error)
   7186 		goto fail_2;
   7187 
   7188 	return 0;
   7189 
   7190  fail_2:
   7191 	for (i = 0; i < rx_done; i++) {
   7192 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7193 		wm_free_rx_buffer(sc, rxq);
   7194 		wm_free_rx_descs(sc, rxq);
   7195 		if (rxq->rxq_lock)
   7196 			mutex_obj_free(rxq->rxq_lock);
   7197 	}
   7198  fail_1:
   7199 	for (i = 0; i < tx_done; i++) {
   7200 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7201 		pcq_destroy(txq->txq_interq);
   7202 		wm_free_tx_buffer(sc, txq);
   7203 		wm_free_tx_descs(sc, txq);
   7204 		if (txq->txq_lock)
   7205 			mutex_obj_free(txq->txq_lock);
   7206 	}
   7207 
   7208 	kmem_free(sc->sc_queue,
   7209 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7210  fail_0:
   7211 	return error;
   7212 }
   7213 
   7214 /*
   7215  * wm_free_quques:
   7216  *	Free {tx,rx}descs and {tx,rx} buffers
   7217  */
   7218 static void
   7219 wm_free_txrx_queues(struct wm_softc *sc)
   7220 {
   7221 	int i;
   7222 
   7223 	for (i = 0; i < sc->sc_nqueues; i++) {
   7224 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7225 
   7226 #ifdef WM_EVENT_COUNTERS
   7227 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7228 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7229 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7230 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7231 #endif /* WM_EVENT_COUNTERS */
   7232 
   7233 		wm_free_rx_buffer(sc, rxq);
   7234 		wm_free_rx_descs(sc, rxq);
   7235 		if (rxq->rxq_lock)
   7236 			mutex_obj_free(rxq->rxq_lock);
   7237 	}
   7238 
   7239 	for (i = 0; i < sc->sc_nqueues; i++) {
   7240 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7241 		struct mbuf *m;
   7242 #ifdef WM_EVENT_COUNTERS
   7243 		int j;
   7244 
   7245 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7246 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7247 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7248 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7249 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7250 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7251 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7252 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7253 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7254 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7255 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7256 
   7257 		for (j = 0; j < WM_NTXSEGS; j++)
   7258 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7259 
   7260 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7261 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7262 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7263 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7264 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7265 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7266 #endif /* WM_EVENT_COUNTERS */
   7267 
   7268 		/* Drain txq_interq */
   7269 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7270 			m_freem(m);
   7271 		pcq_destroy(txq->txq_interq);
   7272 
   7273 		wm_free_tx_buffer(sc, txq);
   7274 		wm_free_tx_descs(sc, txq);
   7275 		if (txq->txq_lock)
   7276 			mutex_obj_free(txq->txq_lock);
   7277 	}
   7278 
   7279 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7280 }
   7281 
   7282 static void
   7283 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7284 {
   7285 
   7286 	KASSERT(mutex_owned(txq->txq_lock));
   7287 
   7288 	/* Initialize the transmit descriptor ring. */
   7289 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7290 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7291 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7292 	txq->txq_free = WM_NTXDESC(txq);
   7293 	txq->txq_next = 0;
   7294 }
   7295 
   7296 static void
   7297 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7298     struct wm_txqueue *txq)
   7299 {
   7300 
   7301 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7302 		device_xname(sc->sc_dev), __func__));
   7303 	KASSERT(mutex_owned(txq->txq_lock));
   7304 
   7305 	if (sc->sc_type < WM_T_82543) {
   7306 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7307 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7308 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7309 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7310 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7311 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7312 	} else {
   7313 		int qid = wmq->wmq_id;
   7314 
   7315 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7316 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7317 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7318 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7319 
   7320 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7321 			/*
   7322 			 * Don't write TDT before TCTL.EN is set.
   7323 			 * See the document.
   7324 			 */
   7325 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7326 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7327 			    | TXDCTL_WTHRESH(0));
   7328 		else {
   7329 			/* XXX should update with AIM? */
   7330 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7331 			if (sc->sc_type >= WM_T_82540) {
   7332 				/* Should be the same */
   7333 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7334 			}
   7335 
   7336 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7337 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7338 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7339 		}
   7340 	}
   7341 }
   7342 
   7343 static void
   7344 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7345 {
   7346 	int i;
   7347 
   7348 	KASSERT(mutex_owned(txq->txq_lock));
   7349 
   7350 	/* Initialize the transmit job descriptors. */
   7351 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7352 		txq->txq_soft[i].txs_mbuf = NULL;
   7353 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7354 	txq->txq_snext = 0;
   7355 	txq->txq_sdirty = 0;
   7356 }
   7357 
   7358 static void
   7359 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7360     struct wm_txqueue *txq)
   7361 {
   7362 
   7363 	KASSERT(mutex_owned(txq->txq_lock));
   7364 
   7365 	/*
   7366 	 * Set up some register offsets that are different between
   7367 	 * the i82542 and the i82543 and later chips.
   7368 	 */
   7369 	if (sc->sc_type < WM_T_82543)
   7370 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7371 	else
   7372 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7373 
   7374 	wm_init_tx_descs(sc, txq);
   7375 	wm_init_tx_regs(sc, wmq, txq);
   7376 	wm_init_tx_buffer(sc, txq);
   7377 
   7378 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7379 	txq->txq_sending = false;
   7380 }
   7381 
   7382 static void
   7383 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7384     struct wm_rxqueue *rxq)
   7385 {
   7386 
   7387 	KASSERT(mutex_owned(rxq->rxq_lock));
   7388 
   7389 	/*
   7390 	 * Initialize the receive descriptor and receive job
   7391 	 * descriptor rings.
   7392 	 */
   7393 	if (sc->sc_type < WM_T_82543) {
   7394 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7395 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7396 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7397 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7398 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7399 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7400 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7401 
   7402 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7403 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7404 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7405 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7406 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7407 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7408 	} else {
   7409 		int qid = wmq->wmq_id;
   7410 
   7411 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7412 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7413 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7414 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7415 
   7416 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7417 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7418 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7419 
   7420 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7421 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7422 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7423 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7424 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7425 			    | RXDCTL_WTHRESH(1));
   7426 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7427 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7428 		} else {
   7429 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7430 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7431 			/* XXX should update with AIM? */
   7432 			CSR_WRITE(sc, WMREG_RDTR,
   7433 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7434 			/* MUST be same */
   7435 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7436 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7437 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7438 		}
   7439 	}
   7440 }
   7441 
   7442 static int
   7443 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7444 {
   7445 	struct wm_rxsoft *rxs;
   7446 	int error, i;
   7447 
   7448 	KASSERT(mutex_owned(rxq->rxq_lock));
   7449 
   7450 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7451 		rxs = &rxq->rxq_soft[i];
   7452 		if (rxs->rxs_mbuf == NULL) {
   7453 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7454 				log(LOG_ERR, "%s: unable to allocate or map "
   7455 				    "rx buffer %d, error = %d\n",
   7456 				    device_xname(sc->sc_dev), i, error);
   7457 				/*
   7458 				 * XXX Should attempt to run with fewer receive
   7459 				 * XXX buffers instead of just failing.
   7460 				 */
   7461 				wm_rxdrain(rxq);
   7462 				return ENOMEM;
   7463 			}
   7464 		} else {
   7465 			/*
   7466 			 * For 82575 and 82576, the RX descriptors must be
   7467 			 * initialized after the setting of RCTL.EN in
   7468 			 * wm_set_filter()
   7469 			 */
   7470 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7471 				wm_init_rxdesc(rxq, i);
   7472 		}
   7473 	}
   7474 	rxq->rxq_ptr = 0;
   7475 	rxq->rxq_discard = 0;
   7476 	WM_RXCHAIN_RESET(rxq);
   7477 
   7478 	return 0;
   7479 }
   7480 
   7481 static int
   7482 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7483     struct wm_rxqueue *rxq)
   7484 {
   7485 
   7486 	KASSERT(mutex_owned(rxq->rxq_lock));
   7487 
   7488 	/*
   7489 	 * Set up some register offsets that are different between
   7490 	 * the i82542 and the i82543 and later chips.
   7491 	 */
   7492 	if (sc->sc_type < WM_T_82543)
   7493 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7494 	else
   7495 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7496 
   7497 	wm_init_rx_regs(sc, wmq, rxq);
   7498 	return wm_init_rx_buffer(sc, rxq);
   7499 }
   7500 
   7501 /*
   7502  * wm_init_quques:
   7503  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7504  */
   7505 static int
   7506 wm_init_txrx_queues(struct wm_softc *sc)
   7507 {
   7508 	int i, error = 0;
   7509 
   7510 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7511 		device_xname(sc->sc_dev), __func__));
   7512 
   7513 	for (i = 0; i < sc->sc_nqueues; i++) {
   7514 		struct wm_queue *wmq = &sc->sc_queue[i];
   7515 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7516 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7517 
   7518 		/*
   7519 		 * TODO
   7520 		 * Currently, use constant variable instead of AIM.
   7521 		 * Furthermore, the interrupt interval of multiqueue which use
   7522 		 * polling mode is less than default value.
   7523 		 * More tuning and AIM are required.
   7524 		 */
   7525 		if (wm_is_using_multiqueue(sc))
   7526 			wmq->wmq_itr = 50;
   7527 		else
   7528 			wmq->wmq_itr = sc->sc_itr_init;
   7529 		wmq->wmq_set_itr = true;
   7530 
   7531 		mutex_enter(txq->txq_lock);
   7532 		wm_init_tx_queue(sc, wmq, txq);
   7533 		mutex_exit(txq->txq_lock);
   7534 
   7535 		mutex_enter(rxq->rxq_lock);
   7536 		error = wm_init_rx_queue(sc, wmq, rxq);
   7537 		mutex_exit(rxq->rxq_lock);
   7538 		if (error)
   7539 			break;
   7540 	}
   7541 
   7542 	return error;
   7543 }
   7544 
   7545 /*
   7546  * wm_tx_offload:
   7547  *
   7548  *	Set up TCP/IP checksumming parameters for the
   7549  *	specified packet.
   7550  */
   7551 static void
   7552 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7553     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7554 {
   7555 	struct mbuf *m0 = txs->txs_mbuf;
   7556 	struct livengood_tcpip_ctxdesc *t;
   7557 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7558 	uint32_t ipcse;
   7559 	struct ether_header *eh;
   7560 	int offset, iphl;
   7561 	uint8_t fields;
   7562 
   7563 	/*
   7564 	 * XXX It would be nice if the mbuf pkthdr had offset
   7565 	 * fields for the protocol headers.
   7566 	 */
   7567 
   7568 	eh = mtod(m0, struct ether_header *);
   7569 	switch (htons(eh->ether_type)) {
   7570 	case ETHERTYPE_IP:
   7571 	case ETHERTYPE_IPV6:
   7572 		offset = ETHER_HDR_LEN;
   7573 		break;
   7574 
   7575 	case ETHERTYPE_VLAN:
   7576 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7577 		break;
   7578 
   7579 	default:
   7580 		/* Don't support this protocol or encapsulation. */
   7581 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7582 		txq->txq_last_hw_ipcs = 0;
   7583 		txq->txq_last_hw_tucs = 0;
   7584 		*fieldsp = 0;
   7585 		*cmdp = 0;
   7586 		return;
   7587 	}
   7588 
   7589 	if ((m0->m_pkthdr.csum_flags &
   7590 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7591 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7592 	} else
   7593 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7594 
   7595 	ipcse = offset + iphl - 1;
   7596 
   7597 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7598 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7599 	seg = 0;
   7600 	fields = 0;
   7601 
   7602 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7603 		int hlen = offset + iphl;
   7604 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7605 
   7606 		if (__predict_false(m0->m_len <
   7607 				    (hlen + sizeof(struct tcphdr)))) {
   7608 			/*
   7609 			 * TCP/IP headers are not in the first mbuf; we need
   7610 			 * to do this the slow and painful way. Let's just
   7611 			 * hope this doesn't happen very often.
   7612 			 */
   7613 			struct tcphdr th;
   7614 
   7615 			WM_Q_EVCNT_INCR(txq, tsopain);
   7616 
   7617 			m_copydata(m0, hlen, sizeof(th), &th);
   7618 			if (v4) {
   7619 				struct ip ip;
   7620 
   7621 				m_copydata(m0, offset, sizeof(ip), &ip);
   7622 				ip.ip_len = 0;
   7623 				m_copyback(m0,
   7624 				    offset + offsetof(struct ip, ip_len),
   7625 				    sizeof(ip.ip_len), &ip.ip_len);
   7626 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7627 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7628 			} else {
   7629 				struct ip6_hdr ip6;
   7630 
   7631 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7632 				ip6.ip6_plen = 0;
   7633 				m_copyback(m0,
   7634 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7635 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7636 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7637 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7638 			}
   7639 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7640 			    sizeof(th.th_sum), &th.th_sum);
   7641 
   7642 			hlen += th.th_off << 2;
   7643 		} else {
   7644 			/*
   7645 			 * TCP/IP headers are in the first mbuf; we can do
   7646 			 * this the easy way.
   7647 			 */
   7648 			struct tcphdr *th;
   7649 
   7650 			if (v4) {
   7651 				struct ip *ip =
   7652 				    (void *)(mtod(m0, char *) + offset);
   7653 				th = (void *)(mtod(m0, char *) + hlen);
   7654 
   7655 				ip->ip_len = 0;
   7656 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7657 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7658 			} else {
   7659 				struct ip6_hdr *ip6 =
   7660 				    (void *)(mtod(m0, char *) + offset);
   7661 				th = (void *)(mtod(m0, char *) + hlen);
   7662 
   7663 				ip6->ip6_plen = 0;
   7664 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7665 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7666 			}
   7667 			hlen += th->th_off << 2;
   7668 		}
   7669 
   7670 		if (v4) {
   7671 			WM_Q_EVCNT_INCR(txq, tso);
   7672 			cmdlen |= WTX_TCPIP_CMD_IP;
   7673 		} else {
   7674 			WM_Q_EVCNT_INCR(txq, tso6);
   7675 			ipcse = 0;
   7676 		}
   7677 		cmd |= WTX_TCPIP_CMD_TSE;
   7678 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7679 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7680 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7681 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7682 	}
   7683 
   7684 	/*
   7685 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7686 	 * offload feature, if we load the context descriptor, we
   7687 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7688 	 */
   7689 
   7690 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7691 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7692 	    WTX_TCPIP_IPCSE(ipcse);
   7693 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7694 		WM_Q_EVCNT_INCR(txq, ipsum);
   7695 		fields |= WTX_IXSM;
   7696 	}
   7697 
   7698 	offset += iphl;
   7699 
   7700 	if (m0->m_pkthdr.csum_flags &
   7701 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7702 		WM_Q_EVCNT_INCR(txq, tusum);
   7703 		fields |= WTX_TXSM;
   7704 		tucs = WTX_TCPIP_TUCSS(offset) |
   7705 		    WTX_TCPIP_TUCSO(offset +
   7706 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7707 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7708 	} else if ((m0->m_pkthdr.csum_flags &
   7709 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7710 		WM_Q_EVCNT_INCR(txq, tusum6);
   7711 		fields |= WTX_TXSM;
   7712 		tucs = WTX_TCPIP_TUCSS(offset) |
   7713 		    WTX_TCPIP_TUCSO(offset +
   7714 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7715 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7716 	} else {
   7717 		/* Just initialize it to a valid TCP context. */
   7718 		tucs = WTX_TCPIP_TUCSS(offset) |
   7719 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7720 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7721 	}
   7722 
   7723 	*cmdp = cmd;
   7724 	*fieldsp = fields;
   7725 
   7726 	/*
   7727 	 * We don't have to write context descriptor for every packet
   7728 	 * except for 82574. For 82574, we must write context descriptor
   7729 	 * for every packet when we use two descriptor queues.
   7730 	 *
   7731 	 * The 82574L can only remember the *last* context used
   7732 	 * regardless of queue that it was use for.  We cannot reuse
   7733 	 * contexts on this hardware platform and must generate a new
   7734 	 * context every time.  82574L hardware spec, section 7.2.6,
   7735 	 * second note.
   7736 	 */
   7737 	if (sc->sc_nqueues < 2) {
   7738 		/*
   7739 		 * Setting up new checksum offload context for every
   7740 		 * frames takes a lot of processing time for hardware.
   7741 		 * This also reduces performance a lot for small sized
   7742 		 * frames so avoid it if driver can use previously
   7743 		 * configured checksum offload context.
   7744 		 * For TSO, in theory we can use the same TSO context only if
   7745 		 * frame is the same type(IP/TCP) and the same MSS. However
   7746 		 * checking whether a frame has the same IP/TCP structure is
   7747 		 * hard thing so just ignore that and always restablish a
   7748 		 * new TSO context.
   7749 		 */
   7750 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   7751 		    == 0) {
   7752 			if (txq->txq_last_hw_cmd == cmd &&
   7753 			    txq->txq_last_hw_fields == fields &&
   7754 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   7755 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   7756 				WM_Q_EVCNT_INCR(txq, skipcontext);
   7757 				return;
   7758 			}
   7759 		}
   7760 
   7761 		txq->txq_last_hw_cmd = cmd;
   7762 		txq->txq_last_hw_fields = fields;
   7763 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   7764 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   7765 	}
   7766 
   7767 	/* Fill in the context descriptor. */
   7768 	t = (struct livengood_tcpip_ctxdesc *)
   7769 	    &txq->txq_descs[txq->txq_next];
   7770 	t->tcpip_ipcs = htole32(ipcs);
   7771 	t->tcpip_tucs = htole32(tucs);
   7772 	t->tcpip_cmdlen = htole32(cmdlen);
   7773 	t->tcpip_seg = htole32(seg);
   7774 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7775 
   7776 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7777 	txs->txs_ndesc++;
   7778 }
   7779 
   7780 static inline int
   7781 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7782 {
   7783 	struct wm_softc *sc = ifp->if_softc;
   7784 	u_int cpuid = cpu_index(curcpu());
   7785 
   7786 	/*
   7787 	 * Currently, simple distribute strategy.
   7788 	 * TODO:
   7789 	 * distribute by flowid(RSS has value).
   7790 	 */
   7791 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7792 }
   7793 
   7794 static inline bool
   7795 wm_linkdown_discard(struct wm_txqueue *txq)
   7796 {
   7797 
   7798 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   7799 		return true;
   7800 
   7801 	return false;
   7802 }
   7803 
   7804 /*
   7805  * wm_start:		[ifnet interface function]
   7806  *
   7807  *	Start packet transmission on the interface.
   7808  */
   7809 static void
   7810 wm_start(struct ifnet *ifp)
   7811 {
   7812 	struct wm_softc *sc = ifp->if_softc;
   7813 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7814 
   7815 #ifdef WM_MPSAFE
   7816 	KASSERT(if_is_mpsafe(ifp));
   7817 #endif
   7818 	/*
   7819 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7820 	 */
   7821 
   7822 	mutex_enter(txq->txq_lock);
   7823 	if (!txq->txq_stopping)
   7824 		wm_start_locked(ifp);
   7825 	mutex_exit(txq->txq_lock);
   7826 }
   7827 
   7828 static void
   7829 wm_start_locked(struct ifnet *ifp)
   7830 {
   7831 	struct wm_softc *sc = ifp->if_softc;
   7832 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7833 
   7834 	wm_send_common_locked(ifp, txq, false);
   7835 }
   7836 
   7837 static int
   7838 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7839 {
   7840 	int qid;
   7841 	struct wm_softc *sc = ifp->if_softc;
   7842 	struct wm_txqueue *txq;
   7843 
   7844 	qid = wm_select_txqueue(ifp, m);
   7845 	txq = &sc->sc_queue[qid].wmq_txq;
   7846 
   7847 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7848 		m_freem(m);
   7849 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7850 		return ENOBUFS;
   7851 	}
   7852 
   7853 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7854 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7855 	if (m->m_flags & M_MCAST)
   7856 		if_statinc_ref(nsr, if_omcasts);
   7857 	IF_STAT_PUTREF(ifp);
   7858 
   7859 	if (mutex_tryenter(txq->txq_lock)) {
   7860 		if (!txq->txq_stopping)
   7861 			wm_transmit_locked(ifp, txq);
   7862 		mutex_exit(txq->txq_lock);
   7863 	}
   7864 
   7865 	return 0;
   7866 }
   7867 
   7868 static void
   7869 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7870 {
   7871 
   7872 	wm_send_common_locked(ifp, txq, true);
   7873 }
   7874 
   7875 static void
   7876 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7877     bool is_transmit)
   7878 {
   7879 	struct wm_softc *sc = ifp->if_softc;
   7880 	struct mbuf *m0;
   7881 	struct wm_txsoft *txs;
   7882 	bus_dmamap_t dmamap;
   7883 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7884 	bus_addr_t curaddr;
   7885 	bus_size_t seglen, curlen;
   7886 	uint32_t cksumcmd;
   7887 	uint8_t cksumfields;
   7888 	bool remap = true;
   7889 
   7890 	KASSERT(mutex_owned(txq->txq_lock));
   7891 
   7892 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7893 		return;
   7894 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7895 		return;
   7896 
   7897 	if (__predict_false(wm_linkdown_discard(txq))) {
   7898 		do {
   7899 			if (is_transmit)
   7900 				m0 = pcq_get(txq->txq_interq);
   7901 			else
   7902 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   7903 			/*
   7904 			 * increment successed packet counter as in the case
   7905 			 * which the packet is discarded by link down PHY.
   7906 			 */
   7907 			if (m0 != NULL)
   7908 				if_statinc(ifp, if_opackets);
   7909 			m_freem(m0);
   7910 		} while (m0 != NULL);
   7911 		return;
   7912 	}
   7913 
   7914 	/* Remember the previous number of free descriptors. */
   7915 	ofree = txq->txq_free;
   7916 
   7917 	/*
   7918 	 * Loop through the send queue, setting up transmit descriptors
   7919 	 * until we drain the queue, or use up all available transmit
   7920 	 * descriptors.
   7921 	 */
   7922 	for (;;) {
   7923 		m0 = NULL;
   7924 
   7925 		/* Get a work queue entry. */
   7926 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7927 			wm_txeof(txq, UINT_MAX);
   7928 			if (txq->txq_sfree == 0) {
   7929 				DPRINTF(sc, WM_DEBUG_TX,
   7930 				    ("%s: TX: no free job descriptors\n",
   7931 					device_xname(sc->sc_dev)));
   7932 				WM_Q_EVCNT_INCR(txq, txsstall);
   7933 				break;
   7934 			}
   7935 		}
   7936 
   7937 		/* Grab a packet off the queue. */
   7938 		if (is_transmit)
   7939 			m0 = pcq_get(txq->txq_interq);
   7940 		else
   7941 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7942 		if (m0 == NULL)
   7943 			break;
   7944 
   7945 		DPRINTF(sc, WM_DEBUG_TX,
   7946 		    ("%s: TX: have packet to transmit: %p\n",
   7947 			device_xname(sc->sc_dev), m0));
   7948 
   7949 		txs = &txq->txq_soft[txq->txq_snext];
   7950 		dmamap = txs->txs_dmamap;
   7951 
   7952 		use_tso = (m0->m_pkthdr.csum_flags &
   7953 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7954 
   7955 		/*
   7956 		 * So says the Linux driver:
   7957 		 * The controller does a simple calculation to make sure
   7958 		 * there is enough room in the FIFO before initiating the
   7959 		 * DMA for each buffer. The calc is:
   7960 		 *	4 = ceil(buffer len / MSS)
   7961 		 * To make sure we don't overrun the FIFO, adjust the max
   7962 		 * buffer len if the MSS drops.
   7963 		 */
   7964 		dmamap->dm_maxsegsz =
   7965 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7966 		    ? m0->m_pkthdr.segsz << 2
   7967 		    : WTX_MAX_LEN;
   7968 
   7969 		/*
   7970 		 * Load the DMA map.  If this fails, the packet either
   7971 		 * didn't fit in the allotted number of segments, or we
   7972 		 * were short on resources.  For the too-many-segments
   7973 		 * case, we simply report an error and drop the packet,
   7974 		 * since we can't sanely copy a jumbo packet to a single
   7975 		 * buffer.
   7976 		 */
   7977 retry:
   7978 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7979 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7980 		if (__predict_false(error)) {
   7981 			if (error == EFBIG) {
   7982 				if (remap == true) {
   7983 					struct mbuf *m;
   7984 
   7985 					remap = false;
   7986 					m = m_defrag(m0, M_NOWAIT);
   7987 					if (m != NULL) {
   7988 						WM_Q_EVCNT_INCR(txq, defrag);
   7989 						m0 = m;
   7990 						goto retry;
   7991 					}
   7992 				}
   7993 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7994 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7995 				    "DMA segments, dropping...\n",
   7996 				    device_xname(sc->sc_dev));
   7997 				wm_dump_mbuf_chain(sc, m0);
   7998 				m_freem(m0);
   7999 				continue;
   8000 			}
   8001 			/* Short on resources, just stop for now. */
   8002 			DPRINTF(sc, WM_DEBUG_TX,
   8003 			    ("%s: TX: dmamap load failed: %d\n",
   8004 				device_xname(sc->sc_dev), error));
   8005 			break;
   8006 		}
   8007 
   8008 		segs_needed = dmamap->dm_nsegs;
   8009 		if (use_tso) {
   8010 			/* For sentinel descriptor; see below. */
   8011 			segs_needed++;
   8012 		}
   8013 
   8014 		/*
   8015 		 * Ensure we have enough descriptors free to describe
   8016 		 * the packet. Note, we always reserve one descriptor
   8017 		 * at the end of the ring due to the semantics of the
   8018 		 * TDT register, plus one more in the event we need
   8019 		 * to load offload context.
   8020 		 */
   8021 		if (segs_needed > txq->txq_free - 2) {
   8022 			/*
   8023 			 * Not enough free descriptors to transmit this
   8024 			 * packet.  We haven't committed anything yet,
   8025 			 * so just unload the DMA map, put the packet
   8026 			 * pack on the queue, and punt. Notify the upper
   8027 			 * layer that there are no more slots left.
   8028 			 */
   8029 			DPRINTF(sc, WM_DEBUG_TX,
   8030 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8031 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8032 				segs_needed, txq->txq_free - 1));
   8033 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8034 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8035 			WM_Q_EVCNT_INCR(txq, txdstall);
   8036 			break;
   8037 		}
   8038 
   8039 		/*
   8040 		 * Check for 82547 Tx FIFO bug. We need to do this
   8041 		 * once we know we can transmit the packet, since we
   8042 		 * do some internal FIFO space accounting here.
   8043 		 */
   8044 		if (sc->sc_type == WM_T_82547 &&
   8045 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8046 			DPRINTF(sc, WM_DEBUG_TX,
   8047 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8048 				device_xname(sc->sc_dev)));
   8049 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8050 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8051 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8052 			break;
   8053 		}
   8054 
   8055 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8056 
   8057 		DPRINTF(sc, WM_DEBUG_TX,
   8058 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8059 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8060 
   8061 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8062 
   8063 		/*
   8064 		 * Store a pointer to the packet so that we can free it
   8065 		 * later.
   8066 		 *
   8067 		 * Initially, we consider the number of descriptors the
   8068 		 * packet uses the number of DMA segments.  This may be
   8069 		 * incremented by 1 if we do checksum offload (a descriptor
   8070 		 * is used to set the checksum context).
   8071 		 */
   8072 		txs->txs_mbuf = m0;
   8073 		txs->txs_firstdesc = txq->txq_next;
   8074 		txs->txs_ndesc = segs_needed;
   8075 
   8076 		/* Set up offload parameters for this packet. */
   8077 		if (m0->m_pkthdr.csum_flags &
   8078 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8079 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8080 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8081 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8082 		} else {
   8083 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8084 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8085 			cksumcmd = 0;
   8086 			cksumfields = 0;
   8087 		}
   8088 
   8089 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8090 
   8091 		/* Sync the DMA map. */
   8092 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8093 		    BUS_DMASYNC_PREWRITE);
   8094 
   8095 		/* Initialize the transmit descriptor. */
   8096 		for (nexttx = txq->txq_next, seg = 0;
   8097 		     seg < dmamap->dm_nsegs; seg++) {
   8098 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8099 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8100 			     seglen != 0;
   8101 			     curaddr += curlen, seglen -= curlen,
   8102 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8103 				curlen = seglen;
   8104 
   8105 				/*
   8106 				 * So says the Linux driver:
   8107 				 * Work around for premature descriptor
   8108 				 * write-backs in TSO mode.  Append a
   8109 				 * 4-byte sentinel descriptor.
   8110 				 */
   8111 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8112 				    curlen > 8)
   8113 					curlen -= 4;
   8114 
   8115 				wm_set_dma_addr(
   8116 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8117 				txq->txq_descs[nexttx].wtx_cmdlen
   8118 				    = htole32(cksumcmd | curlen);
   8119 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8120 				    = 0;
   8121 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8122 				    = cksumfields;
   8123 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8124 				lasttx = nexttx;
   8125 
   8126 				DPRINTF(sc, WM_DEBUG_TX,
   8127 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8128 					"len %#04zx\n",
   8129 					device_xname(sc->sc_dev), nexttx,
   8130 					(uint64_t)curaddr, curlen));
   8131 			}
   8132 		}
   8133 
   8134 		KASSERT(lasttx != -1);
   8135 
   8136 		/*
   8137 		 * Set up the command byte on the last descriptor of
   8138 		 * the packet. If we're in the interrupt delay window,
   8139 		 * delay the interrupt.
   8140 		 */
   8141 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8142 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8143 
   8144 		/*
   8145 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8146 		 * up the descriptor to encapsulate the packet for us.
   8147 		 *
   8148 		 * This is only valid on the last descriptor of the packet.
   8149 		 */
   8150 		if (vlan_has_tag(m0)) {
   8151 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8152 			    htole32(WTX_CMD_VLE);
   8153 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8154 			    = htole16(vlan_get_tag(m0));
   8155 		}
   8156 
   8157 		txs->txs_lastdesc = lasttx;
   8158 
   8159 		DPRINTF(sc, WM_DEBUG_TX,
   8160 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8161 			device_xname(sc->sc_dev),
   8162 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8163 
   8164 		/* Sync the descriptors we're using. */
   8165 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8166 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8167 
   8168 		/* Give the packet to the chip. */
   8169 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8170 
   8171 		DPRINTF(sc, WM_DEBUG_TX,
   8172 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8173 
   8174 		DPRINTF(sc, WM_DEBUG_TX,
   8175 		    ("%s: TX: finished transmitting packet, job %d\n",
   8176 			device_xname(sc->sc_dev), txq->txq_snext));
   8177 
   8178 		/* Advance the tx pointer. */
   8179 		txq->txq_free -= txs->txs_ndesc;
   8180 		txq->txq_next = nexttx;
   8181 
   8182 		txq->txq_sfree--;
   8183 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8184 
   8185 		/* Pass the packet to any BPF listeners. */
   8186 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8187 	}
   8188 
   8189 	if (m0 != NULL) {
   8190 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8191 		WM_Q_EVCNT_INCR(txq, descdrop);
   8192 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8193 			__func__));
   8194 		m_freem(m0);
   8195 	}
   8196 
   8197 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8198 		/* No more slots; notify upper layer. */
   8199 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8200 	}
   8201 
   8202 	if (txq->txq_free != ofree) {
   8203 		/* Set a watchdog timer in case the chip flakes out. */
   8204 		txq->txq_lastsent = time_uptime;
   8205 		txq->txq_sending = true;
   8206 	}
   8207 }
   8208 
   8209 /*
   8210  * wm_nq_tx_offload:
   8211  *
   8212  *	Set up TCP/IP checksumming parameters for the
   8213  *	specified packet, for NEWQUEUE devices
   8214  */
   8215 static void
   8216 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8217     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8218 {
   8219 	struct mbuf *m0 = txs->txs_mbuf;
   8220 	uint32_t vl_len, mssidx, cmdc;
   8221 	struct ether_header *eh;
   8222 	int offset, iphl;
   8223 
   8224 	/*
   8225 	 * XXX It would be nice if the mbuf pkthdr had offset
   8226 	 * fields for the protocol headers.
   8227 	 */
   8228 	*cmdlenp = 0;
   8229 	*fieldsp = 0;
   8230 
   8231 	eh = mtod(m0, struct ether_header *);
   8232 	switch (htons(eh->ether_type)) {
   8233 	case ETHERTYPE_IP:
   8234 	case ETHERTYPE_IPV6:
   8235 		offset = ETHER_HDR_LEN;
   8236 		break;
   8237 
   8238 	case ETHERTYPE_VLAN:
   8239 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8240 		break;
   8241 
   8242 	default:
   8243 		/* Don't support this protocol or encapsulation. */
   8244 		*do_csum = false;
   8245 		return;
   8246 	}
   8247 	*do_csum = true;
   8248 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8249 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8250 
   8251 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8252 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8253 
   8254 	if ((m0->m_pkthdr.csum_flags &
   8255 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8256 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8257 	} else {
   8258 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8259 	}
   8260 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8261 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8262 
   8263 	if (vlan_has_tag(m0)) {
   8264 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8265 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8266 		*cmdlenp |= NQTX_CMD_VLE;
   8267 	}
   8268 
   8269 	mssidx = 0;
   8270 
   8271 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8272 		int hlen = offset + iphl;
   8273 		int tcp_hlen;
   8274 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8275 
   8276 		if (__predict_false(m0->m_len <
   8277 				    (hlen + sizeof(struct tcphdr)))) {
   8278 			/*
   8279 			 * TCP/IP headers are not in the first mbuf; we need
   8280 			 * to do this the slow and painful way. Let's just
   8281 			 * hope this doesn't happen very often.
   8282 			 */
   8283 			struct tcphdr th;
   8284 
   8285 			WM_Q_EVCNT_INCR(txq, tsopain);
   8286 
   8287 			m_copydata(m0, hlen, sizeof(th), &th);
   8288 			if (v4) {
   8289 				struct ip ip;
   8290 
   8291 				m_copydata(m0, offset, sizeof(ip), &ip);
   8292 				ip.ip_len = 0;
   8293 				m_copyback(m0,
   8294 				    offset + offsetof(struct ip, ip_len),
   8295 				    sizeof(ip.ip_len), &ip.ip_len);
   8296 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8297 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8298 			} else {
   8299 				struct ip6_hdr ip6;
   8300 
   8301 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8302 				ip6.ip6_plen = 0;
   8303 				m_copyback(m0,
   8304 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8305 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8306 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8307 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8308 			}
   8309 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8310 			    sizeof(th.th_sum), &th.th_sum);
   8311 
   8312 			tcp_hlen = th.th_off << 2;
   8313 		} else {
   8314 			/*
   8315 			 * TCP/IP headers are in the first mbuf; we can do
   8316 			 * this the easy way.
   8317 			 */
   8318 			struct tcphdr *th;
   8319 
   8320 			if (v4) {
   8321 				struct ip *ip =
   8322 				    (void *)(mtod(m0, char *) + offset);
   8323 				th = (void *)(mtod(m0, char *) + hlen);
   8324 
   8325 				ip->ip_len = 0;
   8326 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8327 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8328 			} else {
   8329 				struct ip6_hdr *ip6 =
   8330 				    (void *)(mtod(m0, char *) + offset);
   8331 				th = (void *)(mtod(m0, char *) + hlen);
   8332 
   8333 				ip6->ip6_plen = 0;
   8334 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8335 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8336 			}
   8337 			tcp_hlen = th->th_off << 2;
   8338 		}
   8339 		hlen += tcp_hlen;
   8340 		*cmdlenp |= NQTX_CMD_TSE;
   8341 
   8342 		if (v4) {
   8343 			WM_Q_EVCNT_INCR(txq, tso);
   8344 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8345 		} else {
   8346 			WM_Q_EVCNT_INCR(txq, tso6);
   8347 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8348 		}
   8349 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8350 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8351 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8352 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8353 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8354 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8355 	} else {
   8356 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8357 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8358 	}
   8359 
   8360 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8361 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8362 		cmdc |= NQTXC_CMD_IP4;
   8363 	}
   8364 
   8365 	if (m0->m_pkthdr.csum_flags &
   8366 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8367 		WM_Q_EVCNT_INCR(txq, tusum);
   8368 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8369 			cmdc |= NQTXC_CMD_TCP;
   8370 		else
   8371 			cmdc |= NQTXC_CMD_UDP;
   8372 
   8373 		cmdc |= NQTXC_CMD_IP4;
   8374 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8375 	}
   8376 	if (m0->m_pkthdr.csum_flags &
   8377 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8378 		WM_Q_EVCNT_INCR(txq, tusum6);
   8379 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8380 			cmdc |= NQTXC_CMD_TCP;
   8381 		else
   8382 			cmdc |= NQTXC_CMD_UDP;
   8383 
   8384 		cmdc |= NQTXC_CMD_IP6;
   8385 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8386 	}
   8387 
   8388 	/*
   8389 	 * We don't have to write context descriptor for every packet to
   8390 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8391 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8392 	 * controllers.
   8393 	 * It would be overhead to write context descriptor for every packet,
   8394 	 * however it does not cause problems.
   8395 	 */
   8396 	/* Fill in the context descriptor. */
   8397 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8398 	    htole32(vl_len);
   8399 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8400 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8401 	    htole32(cmdc);
   8402 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8403 	    htole32(mssidx);
   8404 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8405 	DPRINTF(sc, WM_DEBUG_TX,
   8406 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8407 		txq->txq_next, 0, vl_len));
   8408 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8409 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8410 	txs->txs_ndesc++;
   8411 }
   8412 
   8413 /*
   8414  * wm_nq_start:		[ifnet interface function]
   8415  *
   8416  *	Start packet transmission on the interface for NEWQUEUE devices
   8417  */
   8418 static void
   8419 wm_nq_start(struct ifnet *ifp)
   8420 {
   8421 	struct wm_softc *sc = ifp->if_softc;
   8422 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8423 
   8424 #ifdef WM_MPSAFE
   8425 	KASSERT(if_is_mpsafe(ifp));
   8426 #endif
   8427 	/*
   8428 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8429 	 */
   8430 
   8431 	mutex_enter(txq->txq_lock);
   8432 	if (!txq->txq_stopping)
   8433 		wm_nq_start_locked(ifp);
   8434 	mutex_exit(txq->txq_lock);
   8435 }
   8436 
   8437 static void
   8438 wm_nq_start_locked(struct ifnet *ifp)
   8439 {
   8440 	struct wm_softc *sc = ifp->if_softc;
   8441 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8442 
   8443 	wm_nq_send_common_locked(ifp, txq, false);
   8444 }
   8445 
   8446 static int
   8447 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8448 {
   8449 	int qid;
   8450 	struct wm_softc *sc = ifp->if_softc;
   8451 	struct wm_txqueue *txq;
   8452 
   8453 	qid = wm_select_txqueue(ifp, m);
   8454 	txq = &sc->sc_queue[qid].wmq_txq;
   8455 
   8456 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8457 		m_freem(m);
   8458 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8459 		return ENOBUFS;
   8460 	}
   8461 
   8462 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8463 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8464 	if (m->m_flags & M_MCAST)
   8465 		if_statinc_ref(nsr, if_omcasts);
   8466 	IF_STAT_PUTREF(ifp);
   8467 
   8468 	/*
   8469 	 * The situations which this mutex_tryenter() fails at running time
   8470 	 * are below two patterns.
   8471 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8472 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8473 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8474 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8475 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8476 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8477 	 * stuck, either.
   8478 	 */
   8479 	if (mutex_tryenter(txq->txq_lock)) {
   8480 		if (!txq->txq_stopping)
   8481 			wm_nq_transmit_locked(ifp, txq);
   8482 		mutex_exit(txq->txq_lock);
   8483 	}
   8484 
   8485 	return 0;
   8486 }
   8487 
   8488 static void
   8489 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8490 {
   8491 
   8492 	wm_nq_send_common_locked(ifp, txq, true);
   8493 }
   8494 
   8495 static void
   8496 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8497     bool is_transmit)
   8498 {
   8499 	struct wm_softc *sc = ifp->if_softc;
   8500 	struct mbuf *m0;
   8501 	struct wm_txsoft *txs;
   8502 	bus_dmamap_t dmamap;
   8503 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8504 	bool do_csum, sent;
   8505 	bool remap = true;
   8506 
   8507 	KASSERT(mutex_owned(txq->txq_lock));
   8508 
   8509 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8510 		return;
   8511 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8512 		return;
   8513 
   8514 	if (__predict_false(wm_linkdown_discard(txq))) {
   8515 		do {
   8516 			if (is_transmit)
   8517 				m0 = pcq_get(txq->txq_interq);
   8518 			else
   8519 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8520 			/*
   8521 			 * increment successed packet counter as in the case
   8522 			 * which the packet is discarded by link down PHY.
   8523 			 */
   8524 			if (m0 != NULL)
   8525 				if_statinc(ifp, if_opackets);
   8526 			m_freem(m0);
   8527 		} while (m0 != NULL);
   8528 		return;
   8529 	}
   8530 
   8531 	sent = false;
   8532 
   8533 	/*
   8534 	 * Loop through the send queue, setting up transmit descriptors
   8535 	 * until we drain the queue, or use up all available transmit
   8536 	 * descriptors.
   8537 	 */
   8538 	for (;;) {
   8539 		m0 = NULL;
   8540 
   8541 		/* Get a work queue entry. */
   8542 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8543 			wm_txeof(txq, UINT_MAX);
   8544 			if (txq->txq_sfree == 0) {
   8545 				DPRINTF(sc, WM_DEBUG_TX,
   8546 				    ("%s: TX: no free job descriptors\n",
   8547 					device_xname(sc->sc_dev)));
   8548 				WM_Q_EVCNT_INCR(txq, txsstall);
   8549 				break;
   8550 			}
   8551 		}
   8552 
   8553 		/* Grab a packet off the queue. */
   8554 		if (is_transmit)
   8555 			m0 = pcq_get(txq->txq_interq);
   8556 		else
   8557 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8558 		if (m0 == NULL)
   8559 			break;
   8560 
   8561 		DPRINTF(sc, WM_DEBUG_TX,
   8562 		    ("%s: TX: have packet to transmit: %p\n",
   8563 		    device_xname(sc->sc_dev), m0));
   8564 
   8565 		txs = &txq->txq_soft[txq->txq_snext];
   8566 		dmamap = txs->txs_dmamap;
   8567 
   8568 		/*
   8569 		 * Load the DMA map.  If this fails, the packet either
   8570 		 * didn't fit in the allotted number of segments, or we
   8571 		 * were short on resources.  For the too-many-segments
   8572 		 * case, we simply report an error and drop the packet,
   8573 		 * since we can't sanely copy a jumbo packet to a single
   8574 		 * buffer.
   8575 		 */
   8576 retry:
   8577 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8578 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8579 		if (__predict_false(error)) {
   8580 			if (error == EFBIG) {
   8581 				if (remap == true) {
   8582 					struct mbuf *m;
   8583 
   8584 					remap = false;
   8585 					m = m_defrag(m0, M_NOWAIT);
   8586 					if (m != NULL) {
   8587 						WM_Q_EVCNT_INCR(txq, defrag);
   8588 						m0 = m;
   8589 						goto retry;
   8590 					}
   8591 				}
   8592 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8593 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8594 				    "DMA segments, dropping...\n",
   8595 				    device_xname(sc->sc_dev));
   8596 				wm_dump_mbuf_chain(sc, m0);
   8597 				m_freem(m0);
   8598 				continue;
   8599 			}
   8600 			/* Short on resources, just stop for now. */
   8601 			DPRINTF(sc, WM_DEBUG_TX,
   8602 			    ("%s: TX: dmamap load failed: %d\n",
   8603 				device_xname(sc->sc_dev), error));
   8604 			break;
   8605 		}
   8606 
   8607 		segs_needed = dmamap->dm_nsegs;
   8608 
   8609 		/*
   8610 		 * Ensure we have enough descriptors free to describe
   8611 		 * the packet. Note, we always reserve one descriptor
   8612 		 * at the end of the ring due to the semantics of the
   8613 		 * TDT register, plus one more in the event we need
   8614 		 * to load offload context.
   8615 		 */
   8616 		if (segs_needed > txq->txq_free - 2) {
   8617 			/*
   8618 			 * Not enough free descriptors to transmit this
   8619 			 * packet.  We haven't committed anything yet,
   8620 			 * so just unload the DMA map, put the packet
   8621 			 * pack on the queue, and punt. Notify the upper
   8622 			 * layer that there are no more slots left.
   8623 			 */
   8624 			DPRINTF(sc, WM_DEBUG_TX,
   8625 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8626 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8627 				segs_needed, txq->txq_free - 1));
   8628 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8629 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8630 			WM_Q_EVCNT_INCR(txq, txdstall);
   8631 			break;
   8632 		}
   8633 
   8634 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8635 
   8636 		DPRINTF(sc, WM_DEBUG_TX,
   8637 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8638 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8639 
   8640 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8641 
   8642 		/*
   8643 		 * Store a pointer to the packet so that we can free it
   8644 		 * later.
   8645 		 *
   8646 		 * Initially, we consider the number of descriptors the
   8647 		 * packet uses the number of DMA segments.  This may be
   8648 		 * incremented by 1 if we do checksum offload (a descriptor
   8649 		 * is used to set the checksum context).
   8650 		 */
   8651 		txs->txs_mbuf = m0;
   8652 		txs->txs_firstdesc = txq->txq_next;
   8653 		txs->txs_ndesc = segs_needed;
   8654 
   8655 		/* Set up offload parameters for this packet. */
   8656 		uint32_t cmdlen, fields, dcmdlen;
   8657 		if (m0->m_pkthdr.csum_flags &
   8658 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8659 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8660 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8661 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8662 			    &do_csum);
   8663 		} else {
   8664 			do_csum = false;
   8665 			cmdlen = 0;
   8666 			fields = 0;
   8667 		}
   8668 
   8669 		/* Sync the DMA map. */
   8670 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8671 		    BUS_DMASYNC_PREWRITE);
   8672 
   8673 		/* Initialize the first transmit descriptor. */
   8674 		nexttx = txq->txq_next;
   8675 		if (!do_csum) {
   8676 			/* Setup a legacy descriptor */
   8677 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8678 			    dmamap->dm_segs[0].ds_addr);
   8679 			txq->txq_descs[nexttx].wtx_cmdlen =
   8680 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8681 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8682 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8683 			if (vlan_has_tag(m0)) {
   8684 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8685 				    htole32(WTX_CMD_VLE);
   8686 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8687 				    htole16(vlan_get_tag(m0));
   8688 			} else
   8689 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8690 
   8691 			dcmdlen = 0;
   8692 		} else {
   8693 			/* Setup an advanced data descriptor */
   8694 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8695 			    htole64(dmamap->dm_segs[0].ds_addr);
   8696 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8697 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8698 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8699 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8700 			    htole32(fields);
   8701 			DPRINTF(sc, WM_DEBUG_TX,
   8702 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8703 				device_xname(sc->sc_dev), nexttx,
   8704 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8705 			DPRINTF(sc, WM_DEBUG_TX,
   8706 			    ("\t 0x%08x%08x\n", fields,
   8707 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8708 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8709 		}
   8710 
   8711 		lasttx = nexttx;
   8712 		nexttx = WM_NEXTTX(txq, nexttx);
   8713 		/*
   8714 		 * Fill in the next descriptors. legacy or advanced format
   8715 		 * is the same here
   8716 		 */
   8717 		for (seg = 1; seg < dmamap->dm_nsegs;
   8718 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8719 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8720 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8721 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8722 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8723 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8724 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8725 			lasttx = nexttx;
   8726 
   8727 			DPRINTF(sc, WM_DEBUG_TX,
   8728 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8729 				device_xname(sc->sc_dev), nexttx,
   8730 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8731 				dmamap->dm_segs[seg].ds_len));
   8732 		}
   8733 
   8734 		KASSERT(lasttx != -1);
   8735 
   8736 		/*
   8737 		 * Set up the command byte on the last descriptor of
   8738 		 * the packet. If we're in the interrupt delay window,
   8739 		 * delay the interrupt.
   8740 		 */
   8741 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8742 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8743 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8744 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8745 
   8746 		txs->txs_lastdesc = lasttx;
   8747 
   8748 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8749 		    device_xname(sc->sc_dev),
   8750 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8751 
   8752 		/* Sync the descriptors we're using. */
   8753 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8754 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8755 
   8756 		/* Give the packet to the chip. */
   8757 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8758 		sent = true;
   8759 
   8760 		DPRINTF(sc, WM_DEBUG_TX,
   8761 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8762 
   8763 		DPRINTF(sc, WM_DEBUG_TX,
   8764 		    ("%s: TX: finished transmitting packet, job %d\n",
   8765 			device_xname(sc->sc_dev), txq->txq_snext));
   8766 
   8767 		/* Advance the tx pointer. */
   8768 		txq->txq_free -= txs->txs_ndesc;
   8769 		txq->txq_next = nexttx;
   8770 
   8771 		txq->txq_sfree--;
   8772 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8773 
   8774 		/* Pass the packet to any BPF listeners. */
   8775 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8776 	}
   8777 
   8778 	if (m0 != NULL) {
   8779 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8780 		WM_Q_EVCNT_INCR(txq, descdrop);
   8781 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8782 			__func__));
   8783 		m_freem(m0);
   8784 	}
   8785 
   8786 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8787 		/* No more slots; notify upper layer. */
   8788 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8789 	}
   8790 
   8791 	if (sent) {
   8792 		/* Set a watchdog timer in case the chip flakes out. */
   8793 		txq->txq_lastsent = time_uptime;
   8794 		txq->txq_sending = true;
   8795 	}
   8796 }
   8797 
   8798 static void
   8799 wm_deferred_start_locked(struct wm_txqueue *txq)
   8800 {
   8801 	struct wm_softc *sc = txq->txq_sc;
   8802 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8803 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8804 	int qid = wmq->wmq_id;
   8805 
   8806 	KASSERT(mutex_owned(txq->txq_lock));
   8807 
   8808 	if (txq->txq_stopping) {
   8809 		mutex_exit(txq->txq_lock);
   8810 		return;
   8811 	}
   8812 
   8813 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8814 		/* XXX need for ALTQ or one CPU system */
   8815 		if (qid == 0)
   8816 			wm_nq_start_locked(ifp);
   8817 		wm_nq_transmit_locked(ifp, txq);
   8818 	} else {
   8819 		/* XXX need for ALTQ or one CPU system */
   8820 		if (qid == 0)
   8821 			wm_start_locked(ifp);
   8822 		wm_transmit_locked(ifp, txq);
   8823 	}
   8824 }
   8825 
   8826 /* Interrupt */
   8827 
   8828 /*
   8829  * wm_txeof:
   8830  *
   8831  *	Helper; handle transmit interrupts.
   8832  */
   8833 static bool
   8834 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8835 {
   8836 	struct wm_softc *sc = txq->txq_sc;
   8837 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8838 	struct wm_txsoft *txs;
   8839 	int count = 0;
   8840 	int i;
   8841 	uint8_t status;
   8842 	bool more = false;
   8843 
   8844 	KASSERT(mutex_owned(txq->txq_lock));
   8845 
   8846 	if (txq->txq_stopping)
   8847 		return false;
   8848 
   8849 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8850 
   8851 	/*
   8852 	 * Go through the Tx list and free mbufs for those
   8853 	 * frames which have been transmitted.
   8854 	 */
   8855 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8856 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8857 		if (limit-- == 0) {
   8858 			more = true;
   8859 			DPRINTF(sc, WM_DEBUG_TX,
   8860 			    ("%s: TX: loop limited, job %d is not processed\n",
   8861 				device_xname(sc->sc_dev), i));
   8862 			break;
   8863 		}
   8864 
   8865 		txs = &txq->txq_soft[i];
   8866 
   8867 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8868 			device_xname(sc->sc_dev), i));
   8869 
   8870 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8871 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8872 
   8873 		status =
   8874 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8875 		if ((status & WTX_ST_DD) == 0) {
   8876 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8877 			    BUS_DMASYNC_PREREAD);
   8878 			break;
   8879 		}
   8880 
   8881 		count++;
   8882 		DPRINTF(sc, WM_DEBUG_TX,
   8883 		    ("%s: TX: job %d done: descs %d..%d\n",
   8884 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8885 		    txs->txs_lastdesc));
   8886 
   8887 		/*
   8888 		 * XXX We should probably be using the statistics
   8889 		 * XXX registers, but I don't know if they exist
   8890 		 * XXX on chips before the i82544.
   8891 		 */
   8892 
   8893 #ifdef WM_EVENT_COUNTERS
   8894 		if (status & WTX_ST_TU)
   8895 			WM_Q_EVCNT_INCR(txq, underrun);
   8896 #endif /* WM_EVENT_COUNTERS */
   8897 
   8898 		/*
   8899 		 * 82574 and newer's document says the status field has neither
   8900 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8901 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8902 		 * Developer's Manual", 82574 datasheet and newer.
   8903 		 *
   8904 		 * XXX I saw the LC bit was set on I218 even though the media
   8905 		 * was full duplex, so the bit might be used for other
   8906 		 * meaning ...(I have no document).
   8907 		 */
   8908 
   8909 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8910 		    && ((sc->sc_type < WM_T_82574)
   8911 			|| (sc->sc_type == WM_T_80003))) {
   8912 			if_statinc(ifp, if_oerrors);
   8913 			if (status & WTX_ST_LC)
   8914 				log(LOG_WARNING, "%s: late collision\n",
   8915 				    device_xname(sc->sc_dev));
   8916 			else if (status & WTX_ST_EC) {
   8917 				if_statadd(ifp, if_collisions,
   8918 				    TX_COLLISION_THRESHOLD + 1);
   8919 				log(LOG_WARNING, "%s: excessive collisions\n",
   8920 				    device_xname(sc->sc_dev));
   8921 			}
   8922 		} else
   8923 			if_statinc(ifp, if_opackets);
   8924 
   8925 		txq->txq_packets++;
   8926 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8927 
   8928 		txq->txq_free += txs->txs_ndesc;
   8929 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8930 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8931 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8932 		m_freem(txs->txs_mbuf);
   8933 		txs->txs_mbuf = NULL;
   8934 	}
   8935 
   8936 	/* Update the dirty transmit buffer pointer. */
   8937 	txq->txq_sdirty = i;
   8938 	DPRINTF(sc, WM_DEBUG_TX,
   8939 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8940 
   8941 	if (count != 0)
   8942 		rnd_add_uint32(&sc->rnd_source, count);
   8943 
   8944 	/*
   8945 	 * If there are no more pending transmissions, cancel the watchdog
   8946 	 * timer.
   8947 	 */
   8948 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8949 		txq->txq_sending = false;
   8950 
   8951 	return more;
   8952 }
   8953 
   8954 static inline uint32_t
   8955 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8956 {
   8957 	struct wm_softc *sc = rxq->rxq_sc;
   8958 
   8959 	if (sc->sc_type == WM_T_82574)
   8960 		return EXTRXC_STATUS(
   8961 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   8962 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8963 		return NQRXC_STATUS(
   8964 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   8965 	else
   8966 		return rxq->rxq_descs[idx].wrx_status;
   8967 }
   8968 
   8969 static inline uint32_t
   8970 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8971 {
   8972 	struct wm_softc *sc = rxq->rxq_sc;
   8973 
   8974 	if (sc->sc_type == WM_T_82574)
   8975 		return EXTRXC_ERROR(
   8976 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   8977 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8978 		return NQRXC_ERROR(
   8979 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   8980 	else
   8981 		return rxq->rxq_descs[idx].wrx_errors;
   8982 }
   8983 
   8984 static inline uint16_t
   8985 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8986 {
   8987 	struct wm_softc *sc = rxq->rxq_sc;
   8988 
   8989 	if (sc->sc_type == WM_T_82574)
   8990 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8991 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8992 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8993 	else
   8994 		return rxq->rxq_descs[idx].wrx_special;
   8995 }
   8996 
   8997 static inline int
   8998 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8999 {
   9000 	struct wm_softc *sc = rxq->rxq_sc;
   9001 
   9002 	if (sc->sc_type == WM_T_82574)
   9003 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9004 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9005 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9006 	else
   9007 		return rxq->rxq_descs[idx].wrx_len;
   9008 }
   9009 
   9010 #ifdef WM_DEBUG
   9011 static inline uint32_t
   9012 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9013 {
   9014 	struct wm_softc *sc = rxq->rxq_sc;
   9015 
   9016 	if (sc->sc_type == WM_T_82574)
   9017 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9018 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9019 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9020 	else
   9021 		return 0;
   9022 }
   9023 
   9024 static inline uint8_t
   9025 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9026 {
   9027 	struct wm_softc *sc = rxq->rxq_sc;
   9028 
   9029 	if (sc->sc_type == WM_T_82574)
   9030 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9031 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9032 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9033 	else
   9034 		return 0;
   9035 }
   9036 #endif /* WM_DEBUG */
   9037 
   9038 static inline bool
   9039 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9040     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9041 {
   9042 
   9043 	if (sc->sc_type == WM_T_82574)
   9044 		return (status & ext_bit) != 0;
   9045 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9046 		return (status & nq_bit) != 0;
   9047 	else
   9048 		return (status & legacy_bit) != 0;
   9049 }
   9050 
   9051 static inline bool
   9052 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9053     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9054 {
   9055 
   9056 	if (sc->sc_type == WM_T_82574)
   9057 		return (error & ext_bit) != 0;
   9058 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9059 		return (error & nq_bit) != 0;
   9060 	else
   9061 		return (error & legacy_bit) != 0;
   9062 }
   9063 
   9064 static inline bool
   9065 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9066 {
   9067 
   9068 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9069 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9070 		return true;
   9071 	else
   9072 		return false;
   9073 }
   9074 
   9075 static inline bool
   9076 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9077 {
   9078 	struct wm_softc *sc = rxq->rxq_sc;
   9079 
   9080 	/* XXX missing error bit for newqueue? */
   9081 	if (wm_rxdesc_is_set_error(sc, errors,
   9082 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9083 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9084 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9085 		NQRXC_ERROR_RXE)) {
   9086 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9087 		    EXTRXC_ERROR_SE, 0))
   9088 			log(LOG_WARNING, "%s: symbol error\n",
   9089 			    device_xname(sc->sc_dev));
   9090 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9091 		    EXTRXC_ERROR_SEQ, 0))
   9092 			log(LOG_WARNING, "%s: receive sequence error\n",
   9093 			    device_xname(sc->sc_dev));
   9094 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9095 		    EXTRXC_ERROR_CE, 0))
   9096 			log(LOG_WARNING, "%s: CRC error\n",
   9097 			    device_xname(sc->sc_dev));
   9098 		return true;
   9099 	}
   9100 
   9101 	return false;
   9102 }
   9103 
   9104 static inline bool
   9105 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9106 {
   9107 	struct wm_softc *sc = rxq->rxq_sc;
   9108 
   9109 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9110 		NQRXC_STATUS_DD)) {
   9111 		/* We have processed all of the receive descriptors. */
   9112 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9113 		return false;
   9114 	}
   9115 
   9116 	return true;
   9117 }
   9118 
   9119 static inline bool
   9120 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9121     uint16_t vlantag, struct mbuf *m)
   9122 {
   9123 
   9124 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9125 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9126 		vlan_set_tag(m, le16toh(vlantag));
   9127 	}
   9128 
   9129 	return true;
   9130 }
   9131 
   9132 static inline void
   9133 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9134     uint32_t errors, struct mbuf *m)
   9135 {
   9136 	struct wm_softc *sc = rxq->rxq_sc;
   9137 
   9138 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9139 		if (wm_rxdesc_is_set_status(sc, status,
   9140 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9141 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9142 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9143 			if (wm_rxdesc_is_set_error(sc, errors,
   9144 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9145 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9146 		}
   9147 		if (wm_rxdesc_is_set_status(sc, status,
   9148 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9149 			/*
   9150 			 * Note: we don't know if this was TCP or UDP,
   9151 			 * so we just set both bits, and expect the
   9152 			 * upper layers to deal.
   9153 			 */
   9154 			WM_Q_EVCNT_INCR(rxq, tusum);
   9155 			m->m_pkthdr.csum_flags |=
   9156 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9157 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9158 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9159 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9160 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9161 		}
   9162 	}
   9163 }
   9164 
   9165 /*
   9166  * wm_rxeof:
   9167  *
   9168  *	Helper; handle receive interrupts.
   9169  */
   9170 static bool
   9171 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9172 {
   9173 	struct wm_softc *sc = rxq->rxq_sc;
   9174 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9175 	struct wm_rxsoft *rxs;
   9176 	struct mbuf *m;
   9177 	int i, len;
   9178 	int count = 0;
   9179 	uint32_t status, errors;
   9180 	uint16_t vlantag;
   9181 	bool more = false;
   9182 
   9183 	KASSERT(mutex_owned(rxq->rxq_lock));
   9184 
   9185 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9186 		if (limit-- == 0) {
   9187 			more = true;
   9188 			DPRINTF(sc, WM_DEBUG_RX,
   9189 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9190 				device_xname(sc->sc_dev), i));
   9191 			break;
   9192 		}
   9193 
   9194 		rxs = &rxq->rxq_soft[i];
   9195 
   9196 		DPRINTF(sc, WM_DEBUG_RX,
   9197 		    ("%s: RX: checking descriptor %d\n",
   9198 			device_xname(sc->sc_dev), i));
   9199 		wm_cdrxsync(rxq, i,
   9200 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9201 
   9202 		status = wm_rxdesc_get_status(rxq, i);
   9203 		errors = wm_rxdesc_get_errors(rxq, i);
   9204 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9205 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9206 #ifdef WM_DEBUG
   9207 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9208 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9209 #endif
   9210 
   9211 		if (!wm_rxdesc_dd(rxq, i, status)) {
   9212 			break;
   9213 		}
   9214 
   9215 		count++;
   9216 		if (__predict_false(rxq->rxq_discard)) {
   9217 			DPRINTF(sc, WM_DEBUG_RX,
   9218 			    ("%s: RX: discarding contents of descriptor %d\n",
   9219 				device_xname(sc->sc_dev), i));
   9220 			wm_init_rxdesc(rxq, i);
   9221 			if (wm_rxdesc_is_eop(rxq, status)) {
   9222 				/* Reset our state. */
   9223 				DPRINTF(sc, WM_DEBUG_RX,
   9224 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9225 					device_xname(sc->sc_dev)));
   9226 				rxq->rxq_discard = 0;
   9227 			}
   9228 			continue;
   9229 		}
   9230 
   9231 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9232 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9233 
   9234 		m = rxs->rxs_mbuf;
   9235 
   9236 		/*
   9237 		 * Add a new receive buffer to the ring, unless of
   9238 		 * course the length is zero. Treat the latter as a
   9239 		 * failed mapping.
   9240 		 */
   9241 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9242 			/*
   9243 			 * Failed, throw away what we've done so
   9244 			 * far, and discard the rest of the packet.
   9245 			 */
   9246 			if_statinc(ifp, if_ierrors);
   9247 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9248 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9249 			wm_init_rxdesc(rxq, i);
   9250 			if (!wm_rxdesc_is_eop(rxq, status))
   9251 				rxq->rxq_discard = 1;
   9252 			if (rxq->rxq_head != NULL)
   9253 				m_freem(rxq->rxq_head);
   9254 			WM_RXCHAIN_RESET(rxq);
   9255 			DPRINTF(sc, WM_DEBUG_RX,
   9256 			    ("%s: RX: Rx buffer allocation failed, "
   9257 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9258 				rxq->rxq_discard ? " (discard)" : ""));
   9259 			continue;
   9260 		}
   9261 
   9262 		m->m_len = len;
   9263 		rxq->rxq_len += len;
   9264 		DPRINTF(sc, WM_DEBUG_RX,
   9265 		    ("%s: RX: buffer at %p len %d\n",
   9266 			device_xname(sc->sc_dev), m->m_data, len));
   9267 
   9268 		/* If this is not the end of the packet, keep looking. */
   9269 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9270 			WM_RXCHAIN_LINK(rxq, m);
   9271 			DPRINTF(sc, WM_DEBUG_RX,
   9272 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9273 				device_xname(sc->sc_dev), rxq->rxq_len));
   9274 			continue;
   9275 		}
   9276 
   9277 		/*
   9278 		 * Okay, we have the entire packet now. The chip is
   9279 		 * configured to include the FCS except I35[04], I21[01].
   9280 		 * (not all chips can be configured to strip it), so we need
   9281 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9282 		 * in RCTL register is always set, so we don't trim it.
   9283 		 * PCH2 and newer chip also not include FCS when jumbo
   9284 		 * frame is used to do workaround an errata.
   9285 		 * May need to adjust length of previous mbuf in the
   9286 		 * chain if the current mbuf is too short.
   9287 		 */
   9288 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9289 			if (m->m_len < ETHER_CRC_LEN) {
   9290 				rxq->rxq_tail->m_len
   9291 				    -= (ETHER_CRC_LEN - m->m_len);
   9292 				m->m_len = 0;
   9293 			} else
   9294 				m->m_len -= ETHER_CRC_LEN;
   9295 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9296 		} else
   9297 			len = rxq->rxq_len;
   9298 
   9299 		WM_RXCHAIN_LINK(rxq, m);
   9300 
   9301 		*rxq->rxq_tailp = NULL;
   9302 		m = rxq->rxq_head;
   9303 
   9304 		WM_RXCHAIN_RESET(rxq);
   9305 
   9306 		DPRINTF(sc, WM_DEBUG_RX,
   9307 		    ("%s: RX: have entire packet, len -> %d\n",
   9308 			device_xname(sc->sc_dev), len));
   9309 
   9310 		/* If an error occurred, update stats and drop the packet. */
   9311 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9312 			m_freem(m);
   9313 			continue;
   9314 		}
   9315 
   9316 		/* No errors.  Receive the packet. */
   9317 		m_set_rcvif(m, ifp);
   9318 		m->m_pkthdr.len = len;
   9319 		/*
   9320 		 * TODO
   9321 		 * should be save rsshash and rsstype to this mbuf.
   9322 		 */
   9323 		DPRINTF(sc, WM_DEBUG_RX,
   9324 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9325 			device_xname(sc->sc_dev), rsstype, rsshash));
   9326 
   9327 		/*
   9328 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9329 		 * for us.  Associate the tag with the packet.
   9330 		 */
   9331 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9332 			continue;
   9333 
   9334 		/* Set up checksum info for this packet. */
   9335 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9336 
   9337 		rxq->rxq_packets++;
   9338 		rxq->rxq_bytes += len;
   9339 		/* Pass it on. */
   9340 		if_percpuq_enqueue(sc->sc_ipq, m);
   9341 
   9342 		if (rxq->rxq_stopping)
   9343 			break;
   9344 	}
   9345 	rxq->rxq_ptr = i;
   9346 
   9347 	if (count != 0)
   9348 		rnd_add_uint32(&sc->rnd_source, count);
   9349 
   9350 	DPRINTF(sc, WM_DEBUG_RX,
   9351 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9352 
   9353 	return more;
   9354 }
   9355 
   9356 /*
   9357  * wm_linkintr_gmii:
   9358  *
   9359  *	Helper; handle link interrupts for GMII.
   9360  */
   9361 static void
   9362 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9363 {
   9364 	device_t dev = sc->sc_dev;
   9365 	uint32_t status, reg;
   9366 	bool link;
   9367 	int rv;
   9368 
   9369 	KASSERT(WM_CORE_LOCKED(sc));
   9370 
   9371 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9372 		__func__));
   9373 
   9374 	if ((icr & ICR_LSC) == 0) {
   9375 		if (icr & ICR_RXSEQ)
   9376 			DPRINTF(sc, WM_DEBUG_LINK,
   9377 			    ("%s: LINK Receive sequence error\n",
   9378 				device_xname(dev)));
   9379 		return;
   9380 	}
   9381 
   9382 	/* Link status changed */
   9383 	status = CSR_READ(sc, WMREG_STATUS);
   9384 	link = status & STATUS_LU;
   9385 	if (link) {
   9386 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9387 			device_xname(dev),
   9388 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9389 		if (wm_phy_need_linkdown_discard(sc))
   9390 			wm_clear_linkdown_discard(sc);
   9391 	} else {
   9392 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9393 			device_xname(dev)));
   9394 		if (wm_phy_need_linkdown_discard(sc))
   9395 			wm_set_linkdown_discard(sc);
   9396 	}
   9397 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9398 		wm_gig_downshift_workaround_ich8lan(sc);
   9399 
   9400 	if ((sc->sc_type == WM_T_ICH8)
   9401 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9402 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9403 	}
   9404 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9405 		device_xname(dev)));
   9406 	mii_pollstat(&sc->sc_mii);
   9407 	if (sc->sc_type == WM_T_82543) {
   9408 		int miistatus, active;
   9409 
   9410 		/*
   9411 		 * With 82543, we need to force speed and
   9412 		 * duplex on the MAC equal to what the PHY
   9413 		 * speed and duplex configuration is.
   9414 		 */
   9415 		miistatus = sc->sc_mii.mii_media_status;
   9416 
   9417 		if (miistatus & IFM_ACTIVE) {
   9418 			active = sc->sc_mii.mii_media_active;
   9419 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9420 			switch (IFM_SUBTYPE(active)) {
   9421 			case IFM_10_T:
   9422 				sc->sc_ctrl |= CTRL_SPEED_10;
   9423 				break;
   9424 			case IFM_100_TX:
   9425 				sc->sc_ctrl |= CTRL_SPEED_100;
   9426 				break;
   9427 			case IFM_1000_T:
   9428 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9429 				break;
   9430 			default:
   9431 				/*
   9432 				 * Fiber?
   9433 				 * Shoud not enter here.
   9434 				 */
   9435 				device_printf(dev, "unknown media (%x)\n",
   9436 				    active);
   9437 				break;
   9438 			}
   9439 			if (active & IFM_FDX)
   9440 				sc->sc_ctrl |= CTRL_FD;
   9441 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9442 		}
   9443 	} else if (sc->sc_type == WM_T_PCH) {
   9444 		wm_k1_gig_workaround_hv(sc,
   9445 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9446 	}
   9447 
   9448 	/*
   9449 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9450 	 * aggressive resulting in many collisions. To avoid this, increase
   9451 	 * the IPG and reduce Rx latency in the PHY.
   9452 	 */
   9453 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9454 	    && link) {
   9455 		uint32_t tipg_reg;
   9456 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9457 		bool fdx;
   9458 		uint16_t emi_addr, emi_val;
   9459 
   9460 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9461 		tipg_reg &= ~TIPG_IPGT_MASK;
   9462 		fdx = status & STATUS_FD;
   9463 
   9464 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9465 			tipg_reg |= 0xff;
   9466 			/* Reduce Rx latency in analog PHY */
   9467 			emi_val = 0;
   9468 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9469 		    fdx && speed != STATUS_SPEED_1000) {
   9470 			tipg_reg |= 0xc;
   9471 			emi_val = 1;
   9472 		} else {
   9473 			/* Roll back the default values */
   9474 			tipg_reg |= 0x08;
   9475 			emi_val = 1;
   9476 		}
   9477 
   9478 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9479 
   9480 		rv = sc->phy.acquire(sc);
   9481 		if (rv)
   9482 			return;
   9483 
   9484 		if (sc->sc_type == WM_T_PCH2)
   9485 			emi_addr = I82579_RX_CONFIG;
   9486 		else
   9487 			emi_addr = I217_RX_CONFIG;
   9488 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9489 
   9490 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9491 			uint16_t phy_reg;
   9492 
   9493 			sc->phy.readreg_locked(dev, 2,
   9494 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9495 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9496 			if (speed == STATUS_SPEED_100
   9497 			    || speed == STATUS_SPEED_10)
   9498 				phy_reg |= 0x3e8;
   9499 			else
   9500 				phy_reg |= 0xfa;
   9501 			sc->phy.writereg_locked(dev, 2,
   9502 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9503 
   9504 			if (speed == STATUS_SPEED_1000) {
   9505 				sc->phy.readreg_locked(dev, 2,
   9506 				    HV_PM_CTRL, &phy_reg);
   9507 
   9508 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9509 
   9510 				sc->phy.writereg_locked(dev, 2,
   9511 				    HV_PM_CTRL, phy_reg);
   9512 			}
   9513 		}
   9514 		sc->phy.release(sc);
   9515 
   9516 		if (rv)
   9517 			return;
   9518 
   9519 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9520 			uint16_t data, ptr_gap;
   9521 
   9522 			if (speed == STATUS_SPEED_1000) {
   9523 				rv = sc->phy.acquire(sc);
   9524 				if (rv)
   9525 					return;
   9526 
   9527 				rv = sc->phy.readreg_locked(dev, 2,
   9528 				    I82579_UNKNOWN1, &data);
   9529 				if (rv) {
   9530 					sc->phy.release(sc);
   9531 					return;
   9532 				}
   9533 
   9534 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9535 				if (ptr_gap < 0x18) {
   9536 					data &= ~(0x3ff << 2);
   9537 					data |= (0x18 << 2);
   9538 					rv = sc->phy.writereg_locked(dev,
   9539 					    2, I82579_UNKNOWN1, data);
   9540 				}
   9541 				sc->phy.release(sc);
   9542 				if (rv)
   9543 					return;
   9544 			} else {
   9545 				rv = sc->phy.acquire(sc);
   9546 				if (rv)
   9547 					return;
   9548 
   9549 				rv = sc->phy.writereg_locked(dev, 2,
   9550 				    I82579_UNKNOWN1, 0xc023);
   9551 				sc->phy.release(sc);
   9552 				if (rv)
   9553 					return;
   9554 
   9555 			}
   9556 		}
   9557 	}
   9558 
   9559 	/*
   9560 	 * I217 Packet Loss issue:
   9561 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9562 	 * on power up.
   9563 	 * Set the Beacon Duration for I217 to 8 usec
   9564 	 */
   9565 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9566 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9567 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9568 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9569 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9570 	}
   9571 
   9572 	/* Work-around I218 hang issue */
   9573 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9574 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9575 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9576 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9577 		wm_k1_workaround_lpt_lp(sc, link);
   9578 
   9579 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9580 		/*
   9581 		 * Set platform power management values for Latency
   9582 		 * Tolerance Reporting (LTR)
   9583 		 */
   9584 		wm_platform_pm_pch_lpt(sc,
   9585 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9586 	}
   9587 
   9588 	/* Clear link partner's EEE ability */
   9589 	sc->eee_lp_ability = 0;
   9590 
   9591 	/* FEXTNVM6 K1-off workaround */
   9592 	if (sc->sc_type == WM_T_PCH_SPT) {
   9593 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9594 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9595 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9596 		else
   9597 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9598 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9599 	}
   9600 
   9601 	if (!link)
   9602 		return;
   9603 
   9604 	switch (sc->sc_type) {
   9605 	case WM_T_PCH2:
   9606 		wm_k1_workaround_lv(sc);
   9607 		/* FALLTHROUGH */
   9608 	case WM_T_PCH:
   9609 		if (sc->sc_phytype == WMPHY_82578)
   9610 			wm_link_stall_workaround_hv(sc);
   9611 		break;
   9612 	default:
   9613 		break;
   9614 	}
   9615 
   9616 	/* Enable/Disable EEE after link up */
   9617 	if (sc->sc_phytype > WMPHY_82579)
   9618 		wm_set_eee_pchlan(sc);
   9619 }
   9620 
   9621 /*
   9622  * wm_linkintr_tbi:
   9623  *
   9624  *	Helper; handle link interrupts for TBI mode.
   9625  */
   9626 static void
   9627 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9628 {
   9629 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9630 	uint32_t status;
   9631 
   9632 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9633 		__func__));
   9634 
   9635 	status = CSR_READ(sc, WMREG_STATUS);
   9636 	if (icr & ICR_LSC) {
   9637 		wm_check_for_link(sc);
   9638 		if (status & STATUS_LU) {
   9639 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9640 				device_xname(sc->sc_dev),
   9641 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9642 			/*
   9643 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9644 			 * so we should update sc->sc_ctrl
   9645 			 */
   9646 
   9647 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9648 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9649 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9650 			if (status & STATUS_FD)
   9651 				sc->sc_tctl |=
   9652 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9653 			else
   9654 				sc->sc_tctl |=
   9655 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9656 			if (sc->sc_ctrl & CTRL_TFCE)
   9657 				sc->sc_fcrtl |= FCRTL_XONE;
   9658 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9659 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9660 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9661 			sc->sc_tbi_linkup = 1;
   9662 			if_link_state_change(ifp, LINK_STATE_UP);
   9663 		} else {
   9664 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9665 				device_xname(sc->sc_dev)));
   9666 			sc->sc_tbi_linkup = 0;
   9667 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9668 		}
   9669 		/* Update LED */
   9670 		wm_tbi_serdes_set_linkled(sc);
   9671 	} else if (icr & ICR_RXSEQ)
   9672 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9673 			device_xname(sc->sc_dev)));
   9674 }
   9675 
   9676 /*
   9677  * wm_linkintr_serdes:
   9678  *
   9679  *	Helper; handle link interrupts for TBI mode.
   9680  */
   9681 static void
   9682 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9683 {
   9684 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9685 	struct mii_data *mii = &sc->sc_mii;
   9686 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9687 	uint32_t pcs_adv, pcs_lpab, reg;
   9688 
   9689 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9690 		__func__));
   9691 
   9692 	if (icr & ICR_LSC) {
   9693 		/* Check PCS */
   9694 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9695 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9696 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9697 				device_xname(sc->sc_dev)));
   9698 			mii->mii_media_status |= IFM_ACTIVE;
   9699 			sc->sc_tbi_linkup = 1;
   9700 			if_link_state_change(ifp, LINK_STATE_UP);
   9701 		} else {
   9702 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9703 				device_xname(sc->sc_dev)));
   9704 			mii->mii_media_status |= IFM_NONE;
   9705 			sc->sc_tbi_linkup = 0;
   9706 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9707 			wm_tbi_serdes_set_linkled(sc);
   9708 			return;
   9709 		}
   9710 		mii->mii_media_active |= IFM_1000_SX;
   9711 		if ((reg & PCS_LSTS_FDX) != 0)
   9712 			mii->mii_media_active |= IFM_FDX;
   9713 		else
   9714 			mii->mii_media_active |= IFM_HDX;
   9715 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9716 			/* Check flow */
   9717 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9718 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9719 				DPRINTF(sc, WM_DEBUG_LINK,
   9720 				    ("XXX LINKOK but not ACOMP\n"));
   9721 				return;
   9722 			}
   9723 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9724 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9725 			DPRINTF(sc, WM_DEBUG_LINK,
   9726 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9727 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9728 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9729 				mii->mii_media_active |= IFM_FLOW
   9730 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9731 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9732 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9733 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9734 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9735 				mii->mii_media_active |= IFM_FLOW
   9736 				    | IFM_ETH_TXPAUSE;
   9737 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9738 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9739 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9740 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9741 				mii->mii_media_active |= IFM_FLOW
   9742 				    | IFM_ETH_RXPAUSE;
   9743 		}
   9744 		/* Update LED */
   9745 		wm_tbi_serdes_set_linkled(sc);
   9746 	} else
   9747 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9748 		    device_xname(sc->sc_dev)));
   9749 }
   9750 
   9751 /*
   9752  * wm_linkintr:
   9753  *
   9754  *	Helper; handle link interrupts.
   9755  */
   9756 static void
   9757 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9758 {
   9759 
   9760 	KASSERT(WM_CORE_LOCKED(sc));
   9761 
   9762 	if (sc->sc_flags & WM_F_HAS_MII)
   9763 		wm_linkintr_gmii(sc, icr);
   9764 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9765 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9766 		wm_linkintr_serdes(sc, icr);
   9767 	else
   9768 		wm_linkintr_tbi(sc, icr);
   9769 }
   9770 
   9771 
   9772 static inline void
   9773 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9774 {
   9775 
   9776 	if (wmq->wmq_txrx_use_workqueue)
   9777 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9778 	else
   9779 		softint_schedule(wmq->wmq_si);
   9780 }
   9781 
   9782 /*
   9783  * wm_intr_legacy:
   9784  *
   9785  *	Interrupt service routine for INTx and MSI.
   9786  */
   9787 static int
   9788 wm_intr_legacy(void *arg)
   9789 {
   9790 	struct wm_softc *sc = arg;
   9791 	struct wm_queue *wmq = &sc->sc_queue[0];
   9792 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9793 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9794 	uint32_t icr, rndval = 0;
   9795 	int handled = 0;
   9796 
   9797 	while (1 /* CONSTCOND */) {
   9798 		icr = CSR_READ(sc, WMREG_ICR);
   9799 		if ((icr & sc->sc_icr) == 0)
   9800 			break;
   9801 		if (handled == 0)
   9802 			DPRINTF(sc, WM_DEBUG_TX,
   9803 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9804 		if (rndval == 0)
   9805 			rndval = icr;
   9806 
   9807 		mutex_enter(rxq->rxq_lock);
   9808 
   9809 		if (rxq->rxq_stopping) {
   9810 			mutex_exit(rxq->rxq_lock);
   9811 			break;
   9812 		}
   9813 
   9814 		handled = 1;
   9815 
   9816 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9817 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9818 			DPRINTF(sc, WM_DEBUG_RX,
   9819 			    ("%s: RX: got Rx intr 0x%08x\n",
   9820 				device_xname(sc->sc_dev),
   9821 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9822 			WM_Q_EVCNT_INCR(rxq, intr);
   9823 		}
   9824 #endif
   9825 		/*
   9826 		 * wm_rxeof() does *not* call upper layer functions directly,
   9827 		 * as if_percpuq_enqueue() just call softint_schedule().
   9828 		 * So, we can call wm_rxeof() in interrupt context.
   9829 		 */
   9830 		wm_rxeof(rxq, UINT_MAX);
   9831 
   9832 		mutex_exit(rxq->rxq_lock);
   9833 		mutex_enter(txq->txq_lock);
   9834 
   9835 		if (txq->txq_stopping) {
   9836 			mutex_exit(txq->txq_lock);
   9837 			break;
   9838 		}
   9839 
   9840 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9841 		if (icr & ICR_TXDW) {
   9842 			DPRINTF(sc, WM_DEBUG_TX,
   9843 			    ("%s: TX: got TXDW interrupt\n",
   9844 				device_xname(sc->sc_dev)));
   9845 			WM_Q_EVCNT_INCR(txq, txdw);
   9846 		}
   9847 #endif
   9848 		wm_txeof(txq, UINT_MAX);
   9849 
   9850 		mutex_exit(txq->txq_lock);
   9851 		WM_CORE_LOCK(sc);
   9852 
   9853 		if (sc->sc_core_stopping) {
   9854 			WM_CORE_UNLOCK(sc);
   9855 			break;
   9856 		}
   9857 
   9858 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9859 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9860 			wm_linkintr(sc, icr);
   9861 		}
   9862 		if ((icr & ICR_GPI(0)) != 0)
   9863 			device_printf(sc->sc_dev, "got module interrupt\n");
   9864 
   9865 		WM_CORE_UNLOCK(sc);
   9866 
   9867 		if (icr & ICR_RXO) {
   9868 #if defined(WM_DEBUG)
   9869 			log(LOG_WARNING, "%s: Receive overrun\n",
   9870 			    device_xname(sc->sc_dev));
   9871 #endif /* defined(WM_DEBUG) */
   9872 		}
   9873 	}
   9874 
   9875 	rnd_add_uint32(&sc->rnd_source, rndval);
   9876 
   9877 	if (handled) {
   9878 		/* Try to get more packets going. */
   9879 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9880 		wm_sched_handle_queue(sc, wmq);
   9881 	}
   9882 
   9883 	return handled;
   9884 }
   9885 
   9886 static inline void
   9887 wm_txrxintr_disable(struct wm_queue *wmq)
   9888 {
   9889 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9890 
   9891 	if (sc->sc_type == WM_T_82574)
   9892 		CSR_WRITE(sc, WMREG_IMC,
   9893 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9894 	else if (sc->sc_type == WM_T_82575)
   9895 		CSR_WRITE(sc, WMREG_EIMC,
   9896 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9897 	else
   9898 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9899 }
   9900 
   9901 static inline void
   9902 wm_txrxintr_enable(struct wm_queue *wmq)
   9903 {
   9904 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9905 
   9906 	wm_itrs_calculate(sc, wmq);
   9907 
   9908 	/*
   9909 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9910 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9911 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9912 	 * while each wm_handle_queue(wmq) is runnig.
   9913 	 */
   9914 	if (sc->sc_type == WM_T_82574)
   9915 		CSR_WRITE(sc, WMREG_IMS,
   9916 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9917 	else if (sc->sc_type == WM_T_82575)
   9918 		CSR_WRITE(sc, WMREG_EIMS,
   9919 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9920 	else
   9921 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9922 }
   9923 
   9924 static int
   9925 wm_txrxintr_msix(void *arg)
   9926 {
   9927 	struct wm_queue *wmq = arg;
   9928 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9929 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9930 	struct wm_softc *sc = txq->txq_sc;
   9931 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9932 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9933 	bool txmore;
   9934 	bool rxmore;
   9935 
   9936 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9937 
   9938 	DPRINTF(sc, WM_DEBUG_TX,
   9939 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9940 
   9941 	wm_txrxintr_disable(wmq);
   9942 
   9943 	mutex_enter(txq->txq_lock);
   9944 
   9945 	if (txq->txq_stopping) {
   9946 		mutex_exit(txq->txq_lock);
   9947 		return 0;
   9948 	}
   9949 
   9950 	WM_Q_EVCNT_INCR(txq, txdw);
   9951 	txmore = wm_txeof(txq, txlimit);
   9952 	/* wm_deferred start() is done in wm_handle_queue(). */
   9953 	mutex_exit(txq->txq_lock);
   9954 
   9955 	DPRINTF(sc, WM_DEBUG_RX,
   9956 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9957 	mutex_enter(rxq->rxq_lock);
   9958 
   9959 	if (rxq->rxq_stopping) {
   9960 		mutex_exit(rxq->rxq_lock);
   9961 		return 0;
   9962 	}
   9963 
   9964 	WM_Q_EVCNT_INCR(rxq, intr);
   9965 	rxmore = wm_rxeof(rxq, rxlimit);
   9966 	mutex_exit(rxq->rxq_lock);
   9967 
   9968 	wm_itrs_writereg(sc, wmq);
   9969 
   9970 	if (txmore || rxmore) {
   9971 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9972 		wm_sched_handle_queue(sc, wmq);
   9973 	} else
   9974 		wm_txrxintr_enable(wmq);
   9975 
   9976 	return 1;
   9977 }
   9978 
   9979 static void
   9980 wm_handle_queue(void *arg)
   9981 {
   9982 	struct wm_queue *wmq = arg;
   9983 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9984 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9985 	struct wm_softc *sc = txq->txq_sc;
   9986 	u_int txlimit = sc->sc_tx_process_limit;
   9987 	u_int rxlimit = sc->sc_rx_process_limit;
   9988 	bool txmore;
   9989 	bool rxmore;
   9990 
   9991 	mutex_enter(txq->txq_lock);
   9992 	if (txq->txq_stopping) {
   9993 		mutex_exit(txq->txq_lock);
   9994 		return;
   9995 	}
   9996 	txmore = wm_txeof(txq, txlimit);
   9997 	wm_deferred_start_locked(txq);
   9998 	mutex_exit(txq->txq_lock);
   9999 
   10000 	mutex_enter(rxq->rxq_lock);
   10001 	if (rxq->rxq_stopping) {
   10002 		mutex_exit(rxq->rxq_lock);
   10003 		return;
   10004 	}
   10005 	WM_Q_EVCNT_INCR(rxq, defer);
   10006 	rxmore = wm_rxeof(rxq, rxlimit);
   10007 	mutex_exit(rxq->rxq_lock);
   10008 
   10009 	if (txmore || rxmore) {
   10010 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10011 		wm_sched_handle_queue(sc, wmq);
   10012 	} else
   10013 		wm_txrxintr_enable(wmq);
   10014 }
   10015 
   10016 static void
   10017 wm_handle_queue_work(struct work *wk, void *context)
   10018 {
   10019 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10020 
   10021 	/*
   10022 	 * "enqueued flag" is not required here.
   10023 	 */
   10024 	wm_handle_queue(wmq);
   10025 }
   10026 
   10027 /*
   10028  * wm_linkintr_msix:
   10029  *
   10030  *	Interrupt service routine for link status change for MSI-X.
   10031  */
   10032 static int
   10033 wm_linkintr_msix(void *arg)
   10034 {
   10035 	struct wm_softc *sc = arg;
   10036 	uint32_t reg;
   10037 	bool has_rxo;
   10038 
   10039 	reg = CSR_READ(sc, WMREG_ICR);
   10040 	WM_CORE_LOCK(sc);
   10041 	DPRINTF(sc, WM_DEBUG_LINK,
   10042 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10043 		device_xname(sc->sc_dev), reg));
   10044 
   10045 	if (sc->sc_core_stopping)
   10046 		goto out;
   10047 
   10048 	if ((reg & ICR_LSC) != 0) {
   10049 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10050 		wm_linkintr(sc, ICR_LSC);
   10051 	}
   10052 	if ((reg & ICR_GPI(0)) != 0)
   10053 		device_printf(sc->sc_dev, "got module interrupt\n");
   10054 
   10055 	/*
   10056 	 * XXX 82574 MSI-X mode workaround
   10057 	 *
   10058 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10059 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10060 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10061 	 * interrupts by writing WMREG_ICS to process receive packets.
   10062 	 */
   10063 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10064 #if defined(WM_DEBUG)
   10065 		log(LOG_WARNING, "%s: Receive overrun\n",
   10066 		    device_xname(sc->sc_dev));
   10067 #endif /* defined(WM_DEBUG) */
   10068 
   10069 		has_rxo = true;
   10070 		/*
   10071 		 * The RXO interrupt is very high rate when receive traffic is
   10072 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10073 		 * interrupts. ICR_OTHER will be enabled at the end of
   10074 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10075 		 * ICR_RXQ(1) interrupts.
   10076 		 */
   10077 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10078 
   10079 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10080 	}
   10081 
   10082 
   10083 
   10084 out:
   10085 	WM_CORE_UNLOCK(sc);
   10086 
   10087 	if (sc->sc_type == WM_T_82574) {
   10088 		if (!has_rxo)
   10089 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10090 		else
   10091 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10092 	} else if (sc->sc_type == WM_T_82575)
   10093 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10094 	else
   10095 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10096 
   10097 	return 1;
   10098 }
   10099 
   10100 /*
   10101  * Media related.
   10102  * GMII, SGMII, TBI (and SERDES)
   10103  */
   10104 
   10105 /* Common */
   10106 
   10107 /*
   10108  * wm_tbi_serdes_set_linkled:
   10109  *
   10110  *	Update the link LED on TBI and SERDES devices.
   10111  */
   10112 static void
   10113 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10114 {
   10115 
   10116 	if (sc->sc_tbi_linkup)
   10117 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10118 	else
   10119 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10120 
   10121 	/* 82540 or newer devices are active low */
   10122 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10123 
   10124 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10125 }
   10126 
   10127 /* GMII related */
   10128 
   10129 /*
   10130  * wm_gmii_reset:
   10131  *
   10132  *	Reset the PHY.
   10133  */
   10134 static void
   10135 wm_gmii_reset(struct wm_softc *sc)
   10136 {
   10137 	uint32_t reg;
   10138 	int rv;
   10139 
   10140 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10141 		device_xname(sc->sc_dev), __func__));
   10142 
   10143 	rv = sc->phy.acquire(sc);
   10144 	if (rv != 0) {
   10145 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10146 		    __func__);
   10147 		return;
   10148 	}
   10149 
   10150 	switch (sc->sc_type) {
   10151 	case WM_T_82542_2_0:
   10152 	case WM_T_82542_2_1:
   10153 		/* null */
   10154 		break;
   10155 	case WM_T_82543:
   10156 		/*
   10157 		 * With 82543, we need to force speed and duplex on the MAC
   10158 		 * equal to what the PHY speed and duplex configuration is.
   10159 		 * In addition, we need to perform a hardware reset on the PHY
   10160 		 * to take it out of reset.
   10161 		 */
   10162 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10163 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10164 
   10165 		/* The PHY reset pin is active-low. */
   10166 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10167 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10168 		    CTRL_EXT_SWDPIN(4));
   10169 		reg |= CTRL_EXT_SWDPIO(4);
   10170 
   10171 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10172 		CSR_WRITE_FLUSH(sc);
   10173 		delay(10*1000);
   10174 
   10175 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10176 		CSR_WRITE_FLUSH(sc);
   10177 		delay(150);
   10178 #if 0
   10179 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10180 #endif
   10181 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10182 		break;
   10183 	case WM_T_82544:	/* Reset 10000us */
   10184 	case WM_T_82540:
   10185 	case WM_T_82545:
   10186 	case WM_T_82545_3:
   10187 	case WM_T_82546:
   10188 	case WM_T_82546_3:
   10189 	case WM_T_82541:
   10190 	case WM_T_82541_2:
   10191 	case WM_T_82547:
   10192 	case WM_T_82547_2:
   10193 	case WM_T_82571:	/* Reset 100us */
   10194 	case WM_T_82572:
   10195 	case WM_T_82573:
   10196 	case WM_T_82574:
   10197 	case WM_T_82575:
   10198 	case WM_T_82576:
   10199 	case WM_T_82580:
   10200 	case WM_T_I350:
   10201 	case WM_T_I354:
   10202 	case WM_T_I210:
   10203 	case WM_T_I211:
   10204 	case WM_T_82583:
   10205 	case WM_T_80003:
   10206 		/* Generic reset */
   10207 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10208 		CSR_WRITE_FLUSH(sc);
   10209 		delay(20000);
   10210 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10211 		CSR_WRITE_FLUSH(sc);
   10212 		delay(20000);
   10213 
   10214 		if ((sc->sc_type == WM_T_82541)
   10215 		    || (sc->sc_type == WM_T_82541_2)
   10216 		    || (sc->sc_type == WM_T_82547)
   10217 		    || (sc->sc_type == WM_T_82547_2)) {
   10218 			/* Workaround for igp are done in igp_reset() */
   10219 			/* XXX add code to set LED after phy reset */
   10220 		}
   10221 		break;
   10222 	case WM_T_ICH8:
   10223 	case WM_T_ICH9:
   10224 	case WM_T_ICH10:
   10225 	case WM_T_PCH:
   10226 	case WM_T_PCH2:
   10227 	case WM_T_PCH_LPT:
   10228 	case WM_T_PCH_SPT:
   10229 	case WM_T_PCH_CNP:
   10230 		/* Generic reset */
   10231 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10232 		CSR_WRITE_FLUSH(sc);
   10233 		delay(100);
   10234 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10235 		CSR_WRITE_FLUSH(sc);
   10236 		delay(150);
   10237 		break;
   10238 	default:
   10239 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10240 		    __func__);
   10241 		break;
   10242 	}
   10243 
   10244 	sc->phy.release(sc);
   10245 
   10246 	/* get_cfg_done */
   10247 	wm_get_cfg_done(sc);
   10248 
   10249 	/* Extra setup */
   10250 	switch (sc->sc_type) {
   10251 	case WM_T_82542_2_0:
   10252 	case WM_T_82542_2_1:
   10253 	case WM_T_82543:
   10254 	case WM_T_82544:
   10255 	case WM_T_82540:
   10256 	case WM_T_82545:
   10257 	case WM_T_82545_3:
   10258 	case WM_T_82546:
   10259 	case WM_T_82546_3:
   10260 	case WM_T_82541_2:
   10261 	case WM_T_82547_2:
   10262 	case WM_T_82571:
   10263 	case WM_T_82572:
   10264 	case WM_T_82573:
   10265 	case WM_T_82574:
   10266 	case WM_T_82583:
   10267 	case WM_T_82575:
   10268 	case WM_T_82576:
   10269 	case WM_T_82580:
   10270 	case WM_T_I350:
   10271 	case WM_T_I354:
   10272 	case WM_T_I210:
   10273 	case WM_T_I211:
   10274 	case WM_T_80003:
   10275 		/* Null */
   10276 		break;
   10277 	case WM_T_82541:
   10278 	case WM_T_82547:
   10279 		/* XXX Configure actively LED after PHY reset */
   10280 		break;
   10281 	case WM_T_ICH8:
   10282 	case WM_T_ICH9:
   10283 	case WM_T_ICH10:
   10284 	case WM_T_PCH:
   10285 	case WM_T_PCH2:
   10286 	case WM_T_PCH_LPT:
   10287 	case WM_T_PCH_SPT:
   10288 	case WM_T_PCH_CNP:
   10289 		wm_phy_post_reset(sc);
   10290 		break;
   10291 	default:
   10292 		panic("%s: unknown type\n", __func__);
   10293 		break;
   10294 	}
   10295 }
   10296 
   10297 /*
   10298  * Setup sc_phytype and mii_{read|write}reg.
   10299  *
   10300  *  To identify PHY type, correct read/write function should be selected.
   10301  * To select correct read/write function, PCI ID or MAC type are required
   10302  * without accessing PHY registers.
   10303  *
   10304  *  On the first call of this function, PHY ID is not known yet. Check
   10305  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10306  * result might be incorrect.
   10307  *
   10308  *  In the second call, PHY OUI and model is used to identify PHY type.
   10309  * It might not be perfect because of the lack of compared entry, but it
   10310  * would be better than the first call.
   10311  *
   10312  *  If the detected new result and previous assumption is different,
   10313  * diagnous message will be printed.
   10314  */
   10315 static void
   10316 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10317     uint16_t phy_model)
   10318 {
   10319 	device_t dev = sc->sc_dev;
   10320 	struct mii_data *mii = &sc->sc_mii;
   10321 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10322 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10323 	mii_readreg_t new_readreg;
   10324 	mii_writereg_t new_writereg;
   10325 	bool dodiag = true;
   10326 
   10327 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10328 		device_xname(sc->sc_dev), __func__));
   10329 
   10330 	/*
   10331 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10332 	 * incorrect. So don't print diag output when it's 2nd call.
   10333 	 */
   10334 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10335 		dodiag = false;
   10336 
   10337 	if (mii->mii_readreg == NULL) {
   10338 		/*
   10339 		 *  This is the first call of this function. For ICH and PCH
   10340 		 * variants, it's difficult to determine the PHY access method
   10341 		 * by sc_type, so use the PCI product ID for some devices.
   10342 		 */
   10343 
   10344 		switch (sc->sc_pcidevid) {
   10345 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10346 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10347 			/* 82577 */
   10348 			new_phytype = WMPHY_82577;
   10349 			break;
   10350 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10351 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10352 			/* 82578 */
   10353 			new_phytype = WMPHY_82578;
   10354 			break;
   10355 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10356 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10357 			/* 82579 */
   10358 			new_phytype = WMPHY_82579;
   10359 			break;
   10360 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10361 		case PCI_PRODUCT_INTEL_82801I_BM:
   10362 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10363 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10364 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10365 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10366 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10367 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10368 			/* ICH8, 9, 10 with 82567 */
   10369 			new_phytype = WMPHY_BM;
   10370 			break;
   10371 		default:
   10372 			break;
   10373 		}
   10374 	} else {
   10375 		/* It's not the first call. Use PHY OUI and model */
   10376 		switch (phy_oui) {
   10377 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10378 			switch (phy_model) {
   10379 			case 0x0004: /* XXX */
   10380 				new_phytype = WMPHY_82578;
   10381 				break;
   10382 			default:
   10383 				break;
   10384 			}
   10385 			break;
   10386 		case MII_OUI_xxMARVELL:
   10387 			switch (phy_model) {
   10388 			case MII_MODEL_xxMARVELL_I210:
   10389 				new_phytype = WMPHY_I210;
   10390 				break;
   10391 			case MII_MODEL_xxMARVELL_E1011:
   10392 			case MII_MODEL_xxMARVELL_E1000_3:
   10393 			case MII_MODEL_xxMARVELL_E1000_5:
   10394 			case MII_MODEL_xxMARVELL_E1112:
   10395 				new_phytype = WMPHY_M88;
   10396 				break;
   10397 			case MII_MODEL_xxMARVELL_E1149:
   10398 				new_phytype = WMPHY_BM;
   10399 				break;
   10400 			case MII_MODEL_xxMARVELL_E1111:
   10401 			case MII_MODEL_xxMARVELL_I347:
   10402 			case MII_MODEL_xxMARVELL_E1512:
   10403 			case MII_MODEL_xxMARVELL_E1340M:
   10404 			case MII_MODEL_xxMARVELL_E1543:
   10405 				new_phytype = WMPHY_M88;
   10406 				break;
   10407 			case MII_MODEL_xxMARVELL_I82563:
   10408 				new_phytype = WMPHY_GG82563;
   10409 				break;
   10410 			default:
   10411 				break;
   10412 			}
   10413 			break;
   10414 		case MII_OUI_INTEL:
   10415 			switch (phy_model) {
   10416 			case MII_MODEL_INTEL_I82577:
   10417 				new_phytype = WMPHY_82577;
   10418 				break;
   10419 			case MII_MODEL_INTEL_I82579:
   10420 				new_phytype = WMPHY_82579;
   10421 				break;
   10422 			case MII_MODEL_INTEL_I217:
   10423 				new_phytype = WMPHY_I217;
   10424 				break;
   10425 			case MII_MODEL_INTEL_I82580:
   10426 				new_phytype = WMPHY_82580;
   10427 				break;
   10428 			case MII_MODEL_INTEL_I350:
   10429 				new_phytype = WMPHY_I350;
   10430 				break;
   10431 				break;
   10432 			default:
   10433 				break;
   10434 			}
   10435 			break;
   10436 		case MII_OUI_yyINTEL:
   10437 			switch (phy_model) {
   10438 			case MII_MODEL_yyINTEL_I82562G:
   10439 			case MII_MODEL_yyINTEL_I82562EM:
   10440 			case MII_MODEL_yyINTEL_I82562ET:
   10441 				new_phytype = WMPHY_IFE;
   10442 				break;
   10443 			case MII_MODEL_yyINTEL_IGP01E1000:
   10444 				new_phytype = WMPHY_IGP;
   10445 				break;
   10446 			case MII_MODEL_yyINTEL_I82566:
   10447 				new_phytype = WMPHY_IGP_3;
   10448 				break;
   10449 			default:
   10450 				break;
   10451 			}
   10452 			break;
   10453 		default:
   10454 			break;
   10455 		}
   10456 
   10457 		if (dodiag) {
   10458 			if (new_phytype == WMPHY_UNKNOWN)
   10459 				aprint_verbose_dev(dev,
   10460 				    "%s: Unknown PHY model. OUI=%06x, "
   10461 				    "model=%04x\n", __func__, phy_oui,
   10462 				    phy_model);
   10463 
   10464 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10465 			    && (sc->sc_phytype != new_phytype)) {
   10466 				aprint_error_dev(dev, "Previously assumed PHY "
   10467 				    "type(%u) was incorrect. PHY type from PHY"
   10468 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10469 			}
   10470 		}
   10471 	}
   10472 
   10473 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10474 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10475 		/* SGMII */
   10476 		new_readreg = wm_sgmii_readreg;
   10477 		new_writereg = wm_sgmii_writereg;
   10478 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10479 		/* BM2 (phyaddr == 1) */
   10480 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10481 		    && (new_phytype != WMPHY_BM)
   10482 		    && (new_phytype != WMPHY_UNKNOWN))
   10483 			doubt_phytype = new_phytype;
   10484 		new_phytype = WMPHY_BM;
   10485 		new_readreg = wm_gmii_bm_readreg;
   10486 		new_writereg = wm_gmii_bm_writereg;
   10487 	} else if (sc->sc_type >= WM_T_PCH) {
   10488 		/* All PCH* use _hv_ */
   10489 		new_readreg = wm_gmii_hv_readreg;
   10490 		new_writereg = wm_gmii_hv_writereg;
   10491 	} else if (sc->sc_type >= WM_T_ICH8) {
   10492 		/* non-82567 ICH8, 9 and 10 */
   10493 		new_readreg = wm_gmii_i82544_readreg;
   10494 		new_writereg = wm_gmii_i82544_writereg;
   10495 	} else if (sc->sc_type >= WM_T_80003) {
   10496 		/* 80003 */
   10497 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10498 		    && (new_phytype != WMPHY_GG82563)
   10499 		    && (new_phytype != WMPHY_UNKNOWN))
   10500 			doubt_phytype = new_phytype;
   10501 		new_phytype = WMPHY_GG82563;
   10502 		new_readreg = wm_gmii_i80003_readreg;
   10503 		new_writereg = wm_gmii_i80003_writereg;
   10504 	} else if (sc->sc_type >= WM_T_I210) {
   10505 		/* I210 and I211 */
   10506 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10507 		    && (new_phytype != WMPHY_I210)
   10508 		    && (new_phytype != WMPHY_UNKNOWN))
   10509 			doubt_phytype = new_phytype;
   10510 		new_phytype = WMPHY_I210;
   10511 		new_readreg = wm_gmii_gs40g_readreg;
   10512 		new_writereg = wm_gmii_gs40g_writereg;
   10513 	} else if (sc->sc_type >= WM_T_82580) {
   10514 		/* 82580, I350 and I354 */
   10515 		new_readreg = wm_gmii_82580_readreg;
   10516 		new_writereg = wm_gmii_82580_writereg;
   10517 	} else if (sc->sc_type >= WM_T_82544) {
   10518 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10519 		new_readreg = wm_gmii_i82544_readreg;
   10520 		new_writereg = wm_gmii_i82544_writereg;
   10521 	} else {
   10522 		new_readreg = wm_gmii_i82543_readreg;
   10523 		new_writereg = wm_gmii_i82543_writereg;
   10524 	}
   10525 
   10526 	if (new_phytype == WMPHY_BM) {
   10527 		/* All BM use _bm_ */
   10528 		new_readreg = wm_gmii_bm_readreg;
   10529 		new_writereg = wm_gmii_bm_writereg;
   10530 	}
   10531 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10532 		/* All PCH* use _hv_ */
   10533 		new_readreg = wm_gmii_hv_readreg;
   10534 		new_writereg = wm_gmii_hv_writereg;
   10535 	}
   10536 
   10537 	/* Diag output */
   10538 	if (dodiag) {
   10539 		if (doubt_phytype != WMPHY_UNKNOWN)
   10540 			aprint_error_dev(dev, "Assumed new PHY type was "
   10541 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10542 			    new_phytype);
   10543 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10544 		    && (sc->sc_phytype != new_phytype))
   10545 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10546 			    "was incorrect. New PHY type = %u\n",
   10547 			    sc->sc_phytype, new_phytype);
   10548 
   10549 		if ((mii->mii_readreg != NULL) &&
   10550 		    (new_phytype == WMPHY_UNKNOWN))
   10551 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10552 
   10553 		if ((mii->mii_readreg != NULL) &&
   10554 		    (mii->mii_readreg != new_readreg))
   10555 			aprint_error_dev(dev, "Previously assumed PHY "
   10556 			    "read/write function was incorrect.\n");
   10557 	}
   10558 
   10559 	/* Update now */
   10560 	sc->sc_phytype = new_phytype;
   10561 	mii->mii_readreg = new_readreg;
   10562 	mii->mii_writereg = new_writereg;
   10563 	if (new_readreg == wm_gmii_hv_readreg) {
   10564 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10565 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10566 	} else if (new_readreg == wm_sgmii_readreg) {
   10567 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10568 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10569 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10570 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10571 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10572 	}
   10573 }
   10574 
   10575 /*
   10576  * wm_get_phy_id_82575:
   10577  *
   10578  * Return PHY ID. Return -1 if it failed.
   10579  */
   10580 static int
   10581 wm_get_phy_id_82575(struct wm_softc *sc)
   10582 {
   10583 	uint32_t reg;
   10584 	int phyid = -1;
   10585 
   10586 	/* XXX */
   10587 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10588 		return -1;
   10589 
   10590 	if (wm_sgmii_uses_mdio(sc)) {
   10591 		switch (sc->sc_type) {
   10592 		case WM_T_82575:
   10593 		case WM_T_82576:
   10594 			reg = CSR_READ(sc, WMREG_MDIC);
   10595 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10596 			break;
   10597 		case WM_T_82580:
   10598 		case WM_T_I350:
   10599 		case WM_T_I354:
   10600 		case WM_T_I210:
   10601 		case WM_T_I211:
   10602 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10603 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10604 			break;
   10605 		default:
   10606 			return -1;
   10607 		}
   10608 	}
   10609 
   10610 	return phyid;
   10611 }
   10612 
   10613 /*
   10614  * wm_gmii_mediainit:
   10615  *
   10616  *	Initialize media for use on 1000BASE-T devices.
   10617  */
   10618 static void
   10619 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10620 {
   10621 	device_t dev = sc->sc_dev;
   10622 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10623 	struct mii_data *mii = &sc->sc_mii;
   10624 
   10625 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10626 		device_xname(sc->sc_dev), __func__));
   10627 
   10628 	/* We have GMII. */
   10629 	sc->sc_flags |= WM_F_HAS_MII;
   10630 
   10631 	if (sc->sc_type == WM_T_80003)
   10632 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10633 	else
   10634 		sc->sc_tipg = TIPG_1000T_DFLT;
   10635 
   10636 	/*
   10637 	 * Let the chip set speed/duplex on its own based on
   10638 	 * signals from the PHY.
   10639 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10640 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10641 	 */
   10642 	sc->sc_ctrl |= CTRL_SLU;
   10643 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10644 
   10645 	/* Initialize our media structures and probe the GMII. */
   10646 	mii->mii_ifp = ifp;
   10647 
   10648 	mii->mii_statchg = wm_gmii_statchg;
   10649 
   10650 	/* get PHY control from SMBus to PCIe */
   10651 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10652 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10653 	    || (sc->sc_type == WM_T_PCH_CNP))
   10654 		wm_init_phy_workarounds_pchlan(sc);
   10655 
   10656 	wm_gmii_reset(sc);
   10657 
   10658 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10659 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10660 	    wm_gmii_mediastatus, sc->sc_core_lock);
   10661 
   10662 	/* Setup internal SGMII PHY for SFP */
   10663 	wm_sgmii_sfp_preconfig(sc);
   10664 
   10665 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10666 	    || (sc->sc_type == WM_T_82580)
   10667 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10668 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10669 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10670 			/* Attach only one port */
   10671 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10672 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10673 		} else {
   10674 			int i, id;
   10675 			uint32_t ctrl_ext;
   10676 
   10677 			id = wm_get_phy_id_82575(sc);
   10678 			if (id != -1) {
   10679 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10680 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10681 			}
   10682 			if ((id == -1)
   10683 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10684 				/* Power on sgmii phy if it is disabled */
   10685 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10686 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10687 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10688 				CSR_WRITE_FLUSH(sc);
   10689 				delay(300*1000); /* XXX too long */
   10690 
   10691 				/*
   10692 				 * From 1 to 8.
   10693 				 *
   10694 				 * I2C access fails with I2C register's ERROR
   10695 				 * bit set, so prevent error message while
   10696 				 * scanning.
   10697 				 */
   10698 				sc->phy.no_errprint = true;
   10699 				for (i = 1; i < 8; i++)
   10700 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10701 					    0xffffffff, i, MII_OFFSET_ANY,
   10702 					    MIIF_DOPAUSE);
   10703 				sc->phy.no_errprint = false;
   10704 
   10705 				/* Restore previous sfp cage power state */
   10706 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10707 			}
   10708 		}
   10709 	} else
   10710 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10711 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10712 
   10713 	/*
   10714 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10715 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10716 	 */
   10717 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10718 		|| (sc->sc_type == WM_T_PCH_SPT)
   10719 		|| (sc->sc_type == WM_T_PCH_CNP))
   10720 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10721 		wm_set_mdio_slow_mode_hv(sc);
   10722 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10723 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10724 	}
   10725 
   10726 	/*
   10727 	 * (For ICH8 variants)
   10728 	 * If PHY detection failed, use BM's r/w function and retry.
   10729 	 */
   10730 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10731 		/* if failed, retry with *_bm_* */
   10732 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10733 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10734 		    sc->sc_phytype);
   10735 		sc->sc_phytype = WMPHY_BM;
   10736 		mii->mii_readreg = wm_gmii_bm_readreg;
   10737 		mii->mii_writereg = wm_gmii_bm_writereg;
   10738 
   10739 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10740 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10741 	}
   10742 
   10743 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10744 		/* Any PHY wasn't find */
   10745 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10746 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10747 		sc->sc_phytype = WMPHY_NONE;
   10748 	} else {
   10749 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10750 
   10751 		/*
   10752 		 * PHY Found! Check PHY type again by the second call of
   10753 		 * wm_gmii_setup_phytype.
   10754 		 */
   10755 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10756 		    child->mii_mpd_model);
   10757 
   10758 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10759 	}
   10760 }
   10761 
   10762 /*
   10763  * wm_gmii_mediachange:	[ifmedia interface function]
   10764  *
   10765  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10766  */
   10767 static int
   10768 wm_gmii_mediachange(struct ifnet *ifp)
   10769 {
   10770 	struct wm_softc *sc = ifp->if_softc;
   10771 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10772 	uint32_t reg;
   10773 	int rc;
   10774 
   10775 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10776 		device_xname(sc->sc_dev), __func__));
   10777 	if ((ifp->if_flags & IFF_UP) == 0)
   10778 		return 0;
   10779 
   10780 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10781 	if ((sc->sc_type == WM_T_82580)
   10782 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10783 	    || (sc->sc_type == WM_T_I211)) {
   10784 		reg = CSR_READ(sc, WMREG_PHPM);
   10785 		reg &= ~PHPM_GO_LINK_D;
   10786 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10787 	}
   10788 
   10789 	/* Disable D0 LPLU. */
   10790 	wm_lplu_d0_disable(sc);
   10791 
   10792 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10793 	sc->sc_ctrl |= CTRL_SLU;
   10794 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10795 	    || (sc->sc_type > WM_T_82543)) {
   10796 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10797 	} else {
   10798 		sc->sc_ctrl &= ~CTRL_ASDE;
   10799 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10800 		if (ife->ifm_media & IFM_FDX)
   10801 			sc->sc_ctrl |= CTRL_FD;
   10802 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10803 		case IFM_10_T:
   10804 			sc->sc_ctrl |= CTRL_SPEED_10;
   10805 			break;
   10806 		case IFM_100_TX:
   10807 			sc->sc_ctrl |= CTRL_SPEED_100;
   10808 			break;
   10809 		case IFM_1000_T:
   10810 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10811 			break;
   10812 		case IFM_NONE:
   10813 			/* There is no specific setting for IFM_NONE */
   10814 			break;
   10815 		default:
   10816 			panic("wm_gmii_mediachange: bad media 0x%x",
   10817 			    ife->ifm_media);
   10818 		}
   10819 	}
   10820 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10821 	CSR_WRITE_FLUSH(sc);
   10822 
   10823 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10824 		wm_serdes_mediachange(ifp);
   10825 
   10826 	if (sc->sc_type <= WM_T_82543)
   10827 		wm_gmii_reset(sc);
   10828 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10829 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10830 		/* allow time for SFP cage time to power up phy */
   10831 		delay(300 * 1000);
   10832 		wm_gmii_reset(sc);
   10833 	}
   10834 
   10835 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10836 		return 0;
   10837 	return rc;
   10838 }
   10839 
   10840 /*
   10841  * wm_gmii_mediastatus:	[ifmedia interface function]
   10842  *
   10843  *	Get the current interface media status on a 1000BASE-T device.
   10844  */
   10845 static void
   10846 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10847 {
   10848 	struct wm_softc *sc = ifp->if_softc;
   10849 
   10850 	ether_mediastatus(ifp, ifmr);
   10851 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10852 	    | sc->sc_flowflags;
   10853 }
   10854 
   10855 #define	MDI_IO		CTRL_SWDPIN(2)
   10856 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10857 #define	MDI_CLK		CTRL_SWDPIN(3)
   10858 
   10859 static void
   10860 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10861 {
   10862 	uint32_t i, v;
   10863 
   10864 	v = CSR_READ(sc, WMREG_CTRL);
   10865 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10866 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10867 
   10868 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10869 		if (data & i)
   10870 			v |= MDI_IO;
   10871 		else
   10872 			v &= ~MDI_IO;
   10873 		CSR_WRITE(sc, WMREG_CTRL, v);
   10874 		CSR_WRITE_FLUSH(sc);
   10875 		delay(10);
   10876 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10877 		CSR_WRITE_FLUSH(sc);
   10878 		delay(10);
   10879 		CSR_WRITE(sc, WMREG_CTRL, v);
   10880 		CSR_WRITE_FLUSH(sc);
   10881 		delay(10);
   10882 	}
   10883 }
   10884 
   10885 static uint16_t
   10886 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10887 {
   10888 	uint32_t v, i;
   10889 	uint16_t data = 0;
   10890 
   10891 	v = CSR_READ(sc, WMREG_CTRL);
   10892 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10893 	v |= CTRL_SWDPIO(3);
   10894 
   10895 	CSR_WRITE(sc, WMREG_CTRL, v);
   10896 	CSR_WRITE_FLUSH(sc);
   10897 	delay(10);
   10898 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10899 	CSR_WRITE_FLUSH(sc);
   10900 	delay(10);
   10901 	CSR_WRITE(sc, WMREG_CTRL, v);
   10902 	CSR_WRITE_FLUSH(sc);
   10903 	delay(10);
   10904 
   10905 	for (i = 0; i < 16; i++) {
   10906 		data <<= 1;
   10907 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10908 		CSR_WRITE_FLUSH(sc);
   10909 		delay(10);
   10910 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10911 			data |= 1;
   10912 		CSR_WRITE(sc, WMREG_CTRL, v);
   10913 		CSR_WRITE_FLUSH(sc);
   10914 		delay(10);
   10915 	}
   10916 
   10917 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10918 	CSR_WRITE_FLUSH(sc);
   10919 	delay(10);
   10920 	CSR_WRITE(sc, WMREG_CTRL, v);
   10921 	CSR_WRITE_FLUSH(sc);
   10922 	delay(10);
   10923 
   10924 	return data;
   10925 }
   10926 
   10927 #undef MDI_IO
   10928 #undef MDI_DIR
   10929 #undef MDI_CLK
   10930 
   10931 /*
   10932  * wm_gmii_i82543_readreg:	[mii interface function]
   10933  *
   10934  *	Read a PHY register on the GMII (i82543 version).
   10935  */
   10936 static int
   10937 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10938 {
   10939 	struct wm_softc *sc = device_private(dev);
   10940 
   10941 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10942 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10943 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10944 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10945 
   10946 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10947 		device_xname(dev), phy, reg, *val));
   10948 
   10949 	return 0;
   10950 }
   10951 
   10952 /*
   10953  * wm_gmii_i82543_writereg:	[mii interface function]
   10954  *
   10955  *	Write a PHY register on the GMII (i82543 version).
   10956  */
   10957 static int
   10958 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10959 {
   10960 	struct wm_softc *sc = device_private(dev);
   10961 
   10962 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10963 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10964 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10965 	    (MII_COMMAND_START << 30), 32);
   10966 
   10967 	return 0;
   10968 }
   10969 
   10970 /*
   10971  * wm_gmii_mdic_readreg:	[mii interface function]
   10972  *
   10973  *	Read a PHY register on the GMII.
   10974  */
   10975 static int
   10976 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10977 {
   10978 	struct wm_softc *sc = device_private(dev);
   10979 	uint32_t mdic = 0;
   10980 	int i;
   10981 
   10982 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10983 	    && (reg > MII_ADDRMASK)) {
   10984 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10985 		    __func__, sc->sc_phytype, reg);
   10986 		reg &= MII_ADDRMASK;
   10987 	}
   10988 
   10989 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10990 	    MDIC_REGADD(reg));
   10991 
   10992 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10993 		delay(50);
   10994 		mdic = CSR_READ(sc, WMREG_MDIC);
   10995 		if (mdic & MDIC_READY)
   10996 			break;
   10997 	}
   10998 
   10999 	if ((mdic & MDIC_READY) == 0) {
   11000 		DPRINTF(sc, WM_DEBUG_GMII,
   11001 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11002 			device_xname(dev), phy, reg));
   11003 		return ETIMEDOUT;
   11004 	} else if (mdic & MDIC_E) {
   11005 		/* This is normal if no PHY is present. */
   11006 		DPRINTF(sc, WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   11007 			device_xname(sc->sc_dev), phy, reg));
   11008 		return -1;
   11009 	} else
   11010 		*val = MDIC_DATA(mdic);
   11011 
   11012 	/*
   11013 	 * Allow some time after each MDIC transaction to avoid
   11014 	 * reading duplicate data in the next MDIC transaction.
   11015 	 */
   11016 	if (sc->sc_type == WM_T_PCH2)
   11017 		delay(100);
   11018 
   11019 	return 0;
   11020 }
   11021 
   11022 /*
   11023  * wm_gmii_mdic_writereg:	[mii interface function]
   11024  *
   11025  *	Write a PHY register on the GMII.
   11026  */
   11027 static int
   11028 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11029 {
   11030 	struct wm_softc *sc = device_private(dev);
   11031 	uint32_t mdic = 0;
   11032 	int i;
   11033 
   11034 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11035 	    && (reg > MII_ADDRMASK)) {
   11036 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11037 		    __func__, sc->sc_phytype, reg);
   11038 		reg &= MII_ADDRMASK;
   11039 	}
   11040 
   11041 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11042 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11043 
   11044 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11045 		delay(50);
   11046 		mdic = CSR_READ(sc, WMREG_MDIC);
   11047 		if (mdic & MDIC_READY)
   11048 			break;
   11049 	}
   11050 
   11051 	if ((mdic & MDIC_READY) == 0) {
   11052 		DPRINTF(sc, WM_DEBUG_GMII,
   11053 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11054 			device_xname(dev), phy, reg));
   11055 		return ETIMEDOUT;
   11056 	} else if (mdic & MDIC_E) {
   11057 		DPRINTF(sc, WM_DEBUG_GMII,
   11058 		    ("%s: MDIC write error: phy %d reg %d\n",
   11059 			device_xname(dev), phy, reg));
   11060 		return -1;
   11061 	}
   11062 
   11063 	/*
   11064 	 * Allow some time after each MDIC transaction to avoid
   11065 	 * reading duplicate data in the next MDIC transaction.
   11066 	 */
   11067 	if (sc->sc_type == WM_T_PCH2)
   11068 		delay(100);
   11069 
   11070 	return 0;
   11071 }
   11072 
   11073 /*
   11074  * wm_gmii_i82544_readreg:	[mii interface function]
   11075  *
   11076  *	Read a PHY register on the GMII.
   11077  */
   11078 static int
   11079 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11080 {
   11081 	struct wm_softc *sc = device_private(dev);
   11082 	int rv;
   11083 
   11084 	if (sc->phy.acquire(sc)) {
   11085 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11086 		return -1;
   11087 	}
   11088 
   11089 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11090 
   11091 	sc->phy.release(sc);
   11092 
   11093 	return rv;
   11094 }
   11095 
   11096 static int
   11097 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11098 {
   11099 	struct wm_softc *sc = device_private(dev);
   11100 	int rv;
   11101 
   11102 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11103 		switch (sc->sc_phytype) {
   11104 		case WMPHY_IGP:
   11105 		case WMPHY_IGP_2:
   11106 		case WMPHY_IGP_3:
   11107 			rv = wm_gmii_mdic_writereg(dev, phy,
   11108 			    IGPHY_PAGE_SELECT, reg);
   11109 			if (rv != 0)
   11110 				return rv;
   11111 			break;
   11112 		default:
   11113 #ifdef WM_DEBUG
   11114 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11115 			    __func__, sc->sc_phytype, reg);
   11116 #endif
   11117 			break;
   11118 		}
   11119 	}
   11120 
   11121 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11122 }
   11123 
   11124 /*
   11125  * wm_gmii_i82544_writereg:	[mii interface function]
   11126  *
   11127  *	Write a PHY register on the GMII.
   11128  */
   11129 static int
   11130 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11131 {
   11132 	struct wm_softc *sc = device_private(dev);
   11133 	int rv;
   11134 
   11135 	if (sc->phy.acquire(sc)) {
   11136 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11137 		return -1;
   11138 	}
   11139 
   11140 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11141 	sc->phy.release(sc);
   11142 
   11143 	return rv;
   11144 }
   11145 
   11146 static int
   11147 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11148 {
   11149 	struct wm_softc *sc = device_private(dev);
   11150 	int rv;
   11151 
   11152 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11153 		switch (sc->sc_phytype) {
   11154 		case WMPHY_IGP:
   11155 		case WMPHY_IGP_2:
   11156 		case WMPHY_IGP_3:
   11157 			rv = wm_gmii_mdic_writereg(dev, phy,
   11158 			    IGPHY_PAGE_SELECT, reg);
   11159 			if (rv != 0)
   11160 				return rv;
   11161 			break;
   11162 		default:
   11163 #ifdef WM_DEBUG
   11164 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11165 			    __func__, sc->sc_phytype, reg);
   11166 #endif
   11167 			break;
   11168 		}
   11169 	}
   11170 
   11171 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11172 }
   11173 
   11174 /*
   11175  * wm_gmii_i80003_readreg:	[mii interface function]
   11176  *
   11177  *	Read a PHY register on the kumeran
   11178  * This could be handled by the PHY layer if we didn't have to lock the
   11179  * resource ...
   11180  */
   11181 static int
   11182 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11183 {
   11184 	struct wm_softc *sc = device_private(dev);
   11185 	int page_select;
   11186 	uint16_t temp, temp2;
   11187 	int rv = 0;
   11188 
   11189 	if (phy != 1) /* Only one PHY on kumeran bus */
   11190 		return -1;
   11191 
   11192 	if (sc->phy.acquire(sc)) {
   11193 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11194 		return -1;
   11195 	}
   11196 
   11197 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11198 		page_select = GG82563_PHY_PAGE_SELECT;
   11199 	else {
   11200 		/*
   11201 		 * Use Alternative Page Select register to access registers
   11202 		 * 30 and 31.
   11203 		 */
   11204 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11205 	}
   11206 	temp = reg >> GG82563_PAGE_SHIFT;
   11207 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11208 		goto out;
   11209 
   11210 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11211 		/*
   11212 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11213 		 * register.
   11214 		 */
   11215 		delay(200);
   11216 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11217 		if ((rv != 0) || (temp2 != temp)) {
   11218 			device_printf(dev, "%s failed\n", __func__);
   11219 			rv = -1;
   11220 			goto out;
   11221 		}
   11222 		delay(200);
   11223 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11224 		delay(200);
   11225 	} else
   11226 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11227 
   11228 out:
   11229 	sc->phy.release(sc);
   11230 	return rv;
   11231 }
   11232 
   11233 /*
   11234  * wm_gmii_i80003_writereg:	[mii interface function]
   11235  *
   11236  *	Write a PHY register on the kumeran.
   11237  * This could be handled by the PHY layer if we didn't have to lock the
   11238  * resource ...
   11239  */
   11240 static int
   11241 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11242 {
   11243 	struct wm_softc *sc = device_private(dev);
   11244 	int page_select, rv;
   11245 	uint16_t temp, temp2;
   11246 
   11247 	if (phy != 1) /* Only one PHY on kumeran bus */
   11248 		return -1;
   11249 
   11250 	if (sc->phy.acquire(sc)) {
   11251 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11252 		return -1;
   11253 	}
   11254 
   11255 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11256 		page_select = GG82563_PHY_PAGE_SELECT;
   11257 	else {
   11258 		/*
   11259 		 * Use Alternative Page Select register to access registers
   11260 		 * 30 and 31.
   11261 		 */
   11262 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11263 	}
   11264 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11265 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11266 		goto out;
   11267 
   11268 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11269 		/*
   11270 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11271 		 * register.
   11272 		 */
   11273 		delay(200);
   11274 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11275 		if ((rv != 0) || (temp2 != temp)) {
   11276 			device_printf(dev, "%s failed\n", __func__);
   11277 			rv = -1;
   11278 			goto out;
   11279 		}
   11280 		delay(200);
   11281 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11282 		delay(200);
   11283 	} else
   11284 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11285 
   11286 out:
   11287 	sc->phy.release(sc);
   11288 	return rv;
   11289 }
   11290 
   11291 /*
   11292  * wm_gmii_bm_readreg:	[mii interface function]
   11293  *
   11294  *	Read a PHY register on the kumeran
   11295  * This could be handled by the PHY layer if we didn't have to lock the
   11296  * resource ...
   11297  */
   11298 static int
   11299 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11300 {
   11301 	struct wm_softc *sc = device_private(dev);
   11302 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11303 	int rv;
   11304 
   11305 	if (sc->phy.acquire(sc)) {
   11306 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11307 		return -1;
   11308 	}
   11309 
   11310 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11311 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11312 		    || (reg == 31)) ? 1 : phy;
   11313 	/* Page 800 works differently than the rest so it has its own func */
   11314 	if (page == BM_WUC_PAGE) {
   11315 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11316 		goto release;
   11317 	}
   11318 
   11319 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11320 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11321 		    && (sc->sc_type != WM_T_82583))
   11322 			rv = wm_gmii_mdic_writereg(dev, phy,
   11323 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11324 		else
   11325 			rv = wm_gmii_mdic_writereg(dev, phy,
   11326 			    BME1000_PHY_PAGE_SELECT, page);
   11327 		if (rv != 0)
   11328 			goto release;
   11329 	}
   11330 
   11331 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11332 
   11333 release:
   11334 	sc->phy.release(sc);
   11335 	return rv;
   11336 }
   11337 
   11338 /*
   11339  * wm_gmii_bm_writereg:	[mii interface function]
   11340  *
   11341  *	Write a PHY register on the kumeran.
   11342  * This could be handled by the PHY layer if we didn't have to lock the
   11343  * resource ...
   11344  */
   11345 static int
   11346 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11347 {
   11348 	struct wm_softc *sc = device_private(dev);
   11349 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11350 	int rv;
   11351 
   11352 	if (sc->phy.acquire(sc)) {
   11353 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11354 		return -1;
   11355 	}
   11356 
   11357 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11358 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11359 		    || (reg == 31)) ? 1 : phy;
   11360 	/* Page 800 works differently than the rest so it has its own func */
   11361 	if (page == BM_WUC_PAGE) {
   11362 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11363 		goto release;
   11364 	}
   11365 
   11366 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11367 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11368 		    && (sc->sc_type != WM_T_82583))
   11369 			rv = wm_gmii_mdic_writereg(dev, phy,
   11370 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11371 		else
   11372 			rv = wm_gmii_mdic_writereg(dev, phy,
   11373 			    BME1000_PHY_PAGE_SELECT, page);
   11374 		if (rv != 0)
   11375 			goto release;
   11376 	}
   11377 
   11378 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11379 
   11380 release:
   11381 	sc->phy.release(sc);
   11382 	return rv;
   11383 }
   11384 
   11385 /*
   11386  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11387  *  @dev: pointer to the HW structure
   11388  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11389  *
   11390  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11391  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11392  */
   11393 static int
   11394 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11395 {
   11396 #ifdef WM_DEBUG
   11397 	struct wm_softc *sc = device_private(dev);
   11398 #endif
   11399 	uint16_t temp;
   11400 	int rv;
   11401 
   11402 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11403 		device_xname(dev), __func__));
   11404 
   11405 	if (!phy_regp)
   11406 		return -1;
   11407 
   11408 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11409 
   11410 	/* Select Port Control Registers page */
   11411 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11412 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11413 	if (rv != 0)
   11414 		return rv;
   11415 
   11416 	/* Read WUCE and save it */
   11417 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11418 	if (rv != 0)
   11419 		return rv;
   11420 
   11421 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11422 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11423 	 */
   11424 	temp = *phy_regp;
   11425 	temp |= BM_WUC_ENABLE_BIT;
   11426 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11427 
   11428 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11429 		return rv;
   11430 
   11431 	/* Select Host Wakeup Registers page - caller now able to write
   11432 	 * registers on the Wakeup registers page
   11433 	 */
   11434 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11435 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11436 }
   11437 
   11438 /*
   11439  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11440  *  @dev: pointer to the HW structure
   11441  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11442  *
   11443  *  Restore BM_WUC_ENABLE_REG to its original value.
   11444  *
   11445  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11446  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11447  *  caller.
   11448  */
   11449 static int
   11450 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11451 {
   11452 #ifdef WM_DEBUG
   11453 	struct wm_softc *sc = device_private(dev);
   11454 #endif
   11455 
   11456 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11457 		device_xname(dev), __func__));
   11458 
   11459 	if (!phy_regp)
   11460 		return -1;
   11461 
   11462 	/* Select Port Control Registers page */
   11463 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11464 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11465 
   11466 	/* Restore 769.17 to its original value */
   11467 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11468 
   11469 	return 0;
   11470 }
   11471 
   11472 /*
   11473  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11474  *  @sc: pointer to the HW structure
   11475  *  @offset: register offset to be read or written
   11476  *  @val: pointer to the data to read or write
   11477  *  @rd: determines if operation is read or write
   11478  *  @page_set: BM_WUC_PAGE already set and access enabled
   11479  *
   11480  *  Read the PHY register at offset and store the retrieved information in
   11481  *  data, or write data to PHY register at offset.  Note the procedure to
   11482  *  access the PHY wakeup registers is different than reading the other PHY
   11483  *  registers. It works as such:
   11484  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11485  *  2) Set page to 800 for host (801 if we were manageability)
   11486  *  3) Write the address using the address opcode (0x11)
   11487  *  4) Read or write the data using the data opcode (0x12)
   11488  *  5) Restore 769.17.2 to its original value
   11489  *
   11490  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11491  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11492  *
   11493  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11494  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11495  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11496  */
   11497 static int
   11498 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11499 	bool page_set)
   11500 {
   11501 	struct wm_softc *sc = device_private(dev);
   11502 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11503 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11504 	uint16_t wuce;
   11505 	int rv = 0;
   11506 
   11507 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11508 		device_xname(dev), __func__));
   11509 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11510 	if ((sc->sc_type == WM_T_PCH)
   11511 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11512 		device_printf(dev,
   11513 		    "Attempting to access page %d while gig enabled.\n", page);
   11514 	}
   11515 
   11516 	if (!page_set) {
   11517 		/* Enable access to PHY wakeup registers */
   11518 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11519 		if (rv != 0) {
   11520 			device_printf(dev,
   11521 			    "%s: Could not enable PHY wakeup reg access\n",
   11522 			    __func__);
   11523 			return rv;
   11524 		}
   11525 	}
   11526 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11527 		device_xname(sc->sc_dev), __func__, page, regnum));
   11528 
   11529 	/*
   11530 	 * 2) Access PHY wakeup register.
   11531 	 * See wm_access_phy_wakeup_reg_bm.
   11532 	 */
   11533 
   11534 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11535 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11536 	if (rv != 0)
   11537 		return rv;
   11538 
   11539 	if (rd) {
   11540 		/* Read the Wakeup register page value using opcode 0x12 */
   11541 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11542 	} else {
   11543 		/* Write the Wakeup register page value using opcode 0x12 */
   11544 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11545 	}
   11546 	if (rv != 0)
   11547 		return rv;
   11548 
   11549 	if (!page_set)
   11550 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11551 
   11552 	return rv;
   11553 }
   11554 
   11555 /*
   11556  * wm_gmii_hv_readreg:	[mii interface function]
   11557  *
   11558  *	Read a PHY register on the kumeran
   11559  * This could be handled by the PHY layer if we didn't have to lock the
   11560  * resource ...
   11561  */
   11562 static int
   11563 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11564 {
   11565 	struct wm_softc *sc = device_private(dev);
   11566 	int rv;
   11567 
   11568 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11569 		device_xname(dev), __func__));
   11570 	if (sc->phy.acquire(sc)) {
   11571 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11572 		return -1;
   11573 	}
   11574 
   11575 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11576 	sc->phy.release(sc);
   11577 	return rv;
   11578 }
   11579 
   11580 static int
   11581 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11582 {
   11583 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11584 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11585 	int rv;
   11586 
   11587 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11588 
   11589 	/* Page 800 works differently than the rest so it has its own func */
   11590 	if (page == BM_WUC_PAGE)
   11591 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11592 
   11593 	/*
   11594 	 * Lower than page 768 works differently than the rest so it has its
   11595 	 * own func
   11596 	 */
   11597 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11598 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11599 		return -1;
   11600 	}
   11601 
   11602 	/*
   11603 	 * XXX I21[789] documents say that the SMBus Address register is at
   11604 	 * PHY address 01, Page 0 (not 768), Register 26.
   11605 	 */
   11606 	if (page == HV_INTC_FC_PAGE_START)
   11607 		page = 0;
   11608 
   11609 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11610 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11611 		    page << BME1000_PAGE_SHIFT);
   11612 		if (rv != 0)
   11613 			return rv;
   11614 	}
   11615 
   11616 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11617 }
   11618 
   11619 /*
   11620  * wm_gmii_hv_writereg:	[mii interface function]
   11621  *
   11622  *	Write a PHY register on the kumeran.
   11623  * This could be handled by the PHY layer if we didn't have to lock the
   11624  * resource ...
   11625  */
   11626 static int
   11627 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11628 {
   11629 	struct wm_softc *sc = device_private(dev);
   11630 	int rv;
   11631 
   11632 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11633 		device_xname(dev), __func__));
   11634 
   11635 	if (sc->phy.acquire(sc)) {
   11636 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11637 		return -1;
   11638 	}
   11639 
   11640 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11641 	sc->phy.release(sc);
   11642 
   11643 	return rv;
   11644 }
   11645 
   11646 static int
   11647 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11648 {
   11649 	struct wm_softc *sc = device_private(dev);
   11650 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11651 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11652 	int rv;
   11653 
   11654 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11655 
   11656 	/* Page 800 works differently than the rest so it has its own func */
   11657 	if (page == BM_WUC_PAGE)
   11658 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11659 		    false);
   11660 
   11661 	/*
   11662 	 * Lower than page 768 works differently than the rest so it has its
   11663 	 * own func
   11664 	 */
   11665 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11666 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11667 		return -1;
   11668 	}
   11669 
   11670 	{
   11671 		/*
   11672 		 * XXX I21[789] documents say that the SMBus Address register
   11673 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11674 		 */
   11675 		if (page == HV_INTC_FC_PAGE_START)
   11676 			page = 0;
   11677 
   11678 		/*
   11679 		 * XXX Workaround MDIO accesses being disabled after entering
   11680 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11681 		 * register is set)
   11682 		 */
   11683 		if (sc->sc_phytype == WMPHY_82578) {
   11684 			struct mii_softc *child;
   11685 
   11686 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11687 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11688 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11689 			    && ((val & (1 << 11)) != 0)) {
   11690 				device_printf(dev, "XXX need workaround\n");
   11691 			}
   11692 		}
   11693 
   11694 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11695 			rv = wm_gmii_mdic_writereg(dev, 1,
   11696 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11697 			if (rv != 0)
   11698 				return rv;
   11699 		}
   11700 	}
   11701 
   11702 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11703 }
   11704 
   11705 /*
   11706  * wm_gmii_82580_readreg:	[mii interface function]
   11707  *
   11708  *	Read a PHY register on the 82580 and I350.
   11709  * This could be handled by the PHY layer if we didn't have to lock the
   11710  * resource ...
   11711  */
   11712 static int
   11713 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11714 {
   11715 	struct wm_softc *sc = device_private(dev);
   11716 	int rv;
   11717 
   11718 	if (sc->phy.acquire(sc) != 0) {
   11719 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11720 		return -1;
   11721 	}
   11722 
   11723 #ifdef DIAGNOSTIC
   11724 	if (reg > MII_ADDRMASK) {
   11725 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11726 		    __func__, sc->sc_phytype, reg);
   11727 		reg &= MII_ADDRMASK;
   11728 	}
   11729 #endif
   11730 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11731 
   11732 	sc->phy.release(sc);
   11733 	return rv;
   11734 }
   11735 
   11736 /*
   11737  * wm_gmii_82580_writereg:	[mii interface function]
   11738  *
   11739  *	Write a PHY register on the 82580 and I350.
   11740  * This could be handled by the PHY layer if we didn't have to lock the
   11741  * resource ...
   11742  */
   11743 static int
   11744 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11745 {
   11746 	struct wm_softc *sc = device_private(dev);
   11747 	int rv;
   11748 
   11749 	if (sc->phy.acquire(sc) != 0) {
   11750 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11751 		return -1;
   11752 	}
   11753 
   11754 #ifdef DIAGNOSTIC
   11755 	if (reg > MII_ADDRMASK) {
   11756 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11757 		    __func__, sc->sc_phytype, reg);
   11758 		reg &= MII_ADDRMASK;
   11759 	}
   11760 #endif
   11761 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11762 
   11763 	sc->phy.release(sc);
   11764 	return rv;
   11765 }
   11766 
   11767 /*
   11768  * wm_gmii_gs40g_readreg:	[mii interface function]
   11769  *
   11770  *	Read a PHY register on the I2100 and I211.
   11771  * This could be handled by the PHY layer if we didn't have to lock the
   11772  * resource ...
   11773  */
   11774 static int
   11775 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11776 {
   11777 	struct wm_softc *sc = device_private(dev);
   11778 	int page, offset;
   11779 	int rv;
   11780 
   11781 	/* Acquire semaphore */
   11782 	if (sc->phy.acquire(sc)) {
   11783 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11784 		return -1;
   11785 	}
   11786 
   11787 	/* Page select */
   11788 	page = reg >> GS40G_PAGE_SHIFT;
   11789 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11790 	if (rv != 0)
   11791 		goto release;
   11792 
   11793 	/* Read reg */
   11794 	offset = reg & GS40G_OFFSET_MASK;
   11795 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11796 
   11797 release:
   11798 	sc->phy.release(sc);
   11799 	return rv;
   11800 }
   11801 
   11802 /*
   11803  * wm_gmii_gs40g_writereg:	[mii interface function]
   11804  *
   11805  *	Write a PHY register on the I210 and I211.
   11806  * This could be handled by the PHY layer if we didn't have to lock the
   11807  * resource ...
   11808  */
   11809 static int
   11810 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11811 {
   11812 	struct wm_softc *sc = device_private(dev);
   11813 	uint16_t page;
   11814 	int offset, rv;
   11815 
   11816 	/* Acquire semaphore */
   11817 	if (sc->phy.acquire(sc)) {
   11818 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11819 		return -1;
   11820 	}
   11821 
   11822 	/* Page select */
   11823 	page = reg >> GS40G_PAGE_SHIFT;
   11824 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11825 	if (rv != 0)
   11826 		goto release;
   11827 
   11828 	/* Write reg */
   11829 	offset = reg & GS40G_OFFSET_MASK;
   11830 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11831 
   11832 release:
   11833 	/* Release semaphore */
   11834 	sc->phy.release(sc);
   11835 	return rv;
   11836 }
   11837 
   11838 /*
   11839  * wm_gmii_statchg:	[mii interface function]
   11840  *
   11841  *	Callback from MII layer when media changes.
   11842  */
   11843 static void
   11844 wm_gmii_statchg(struct ifnet *ifp)
   11845 {
   11846 	struct wm_softc *sc = ifp->if_softc;
   11847 	struct mii_data *mii = &sc->sc_mii;
   11848 
   11849 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11850 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11851 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11852 
   11853 	/* Get flow control negotiation result. */
   11854 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11855 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11856 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11857 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11858 	}
   11859 
   11860 	if (sc->sc_flowflags & IFM_FLOW) {
   11861 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11862 			sc->sc_ctrl |= CTRL_TFCE;
   11863 			sc->sc_fcrtl |= FCRTL_XONE;
   11864 		}
   11865 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11866 			sc->sc_ctrl |= CTRL_RFCE;
   11867 	}
   11868 
   11869 	if (mii->mii_media_active & IFM_FDX) {
   11870 		DPRINTF(sc, WM_DEBUG_LINK,
   11871 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11872 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11873 	} else {
   11874 		DPRINTF(sc, WM_DEBUG_LINK,
   11875 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11876 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11877 	}
   11878 
   11879 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11880 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11881 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11882 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11883 	if (sc->sc_type == WM_T_80003) {
   11884 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11885 		case IFM_1000_T:
   11886 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11887 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11888 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11889 			break;
   11890 		default:
   11891 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11892 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11893 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11894 			break;
   11895 		}
   11896 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11897 	}
   11898 }
   11899 
   11900 /* kumeran related (80003, ICH* and PCH*) */
   11901 
   11902 /*
   11903  * wm_kmrn_readreg:
   11904  *
   11905  *	Read a kumeran register
   11906  */
   11907 static int
   11908 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11909 {
   11910 	int rv;
   11911 
   11912 	if (sc->sc_type == WM_T_80003)
   11913 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11914 	else
   11915 		rv = sc->phy.acquire(sc);
   11916 	if (rv != 0) {
   11917 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11918 		    __func__);
   11919 		return rv;
   11920 	}
   11921 
   11922 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11923 
   11924 	if (sc->sc_type == WM_T_80003)
   11925 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11926 	else
   11927 		sc->phy.release(sc);
   11928 
   11929 	return rv;
   11930 }
   11931 
   11932 static int
   11933 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11934 {
   11935 
   11936 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11937 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11938 	    KUMCTRLSTA_REN);
   11939 	CSR_WRITE_FLUSH(sc);
   11940 	delay(2);
   11941 
   11942 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11943 
   11944 	return 0;
   11945 }
   11946 
   11947 /*
   11948  * wm_kmrn_writereg:
   11949  *
   11950  *	Write a kumeran register
   11951  */
   11952 static int
   11953 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11954 {
   11955 	int rv;
   11956 
   11957 	if (sc->sc_type == WM_T_80003)
   11958 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11959 	else
   11960 		rv = sc->phy.acquire(sc);
   11961 	if (rv != 0) {
   11962 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11963 		    __func__);
   11964 		return rv;
   11965 	}
   11966 
   11967 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11968 
   11969 	if (sc->sc_type == WM_T_80003)
   11970 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11971 	else
   11972 		sc->phy.release(sc);
   11973 
   11974 	return rv;
   11975 }
   11976 
   11977 static int
   11978 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11979 {
   11980 
   11981 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11982 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11983 
   11984 	return 0;
   11985 }
   11986 
   11987 /*
   11988  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11989  * This access method is different from IEEE MMD.
   11990  */
   11991 static int
   11992 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11993 {
   11994 	struct wm_softc *sc = device_private(dev);
   11995 	int rv;
   11996 
   11997 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11998 	if (rv != 0)
   11999 		return rv;
   12000 
   12001 	if (rd)
   12002 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12003 	else
   12004 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12005 	return rv;
   12006 }
   12007 
   12008 static int
   12009 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12010 {
   12011 
   12012 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12013 }
   12014 
   12015 static int
   12016 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12017 {
   12018 
   12019 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12020 }
   12021 
   12022 /* SGMII related */
   12023 
   12024 /*
   12025  * wm_sgmii_uses_mdio
   12026  *
   12027  * Check whether the transaction is to the internal PHY or the external
   12028  * MDIO interface. Return true if it's MDIO.
   12029  */
   12030 static bool
   12031 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12032 {
   12033 	uint32_t reg;
   12034 	bool ismdio = false;
   12035 
   12036 	switch (sc->sc_type) {
   12037 	case WM_T_82575:
   12038 	case WM_T_82576:
   12039 		reg = CSR_READ(sc, WMREG_MDIC);
   12040 		ismdio = ((reg & MDIC_DEST) != 0);
   12041 		break;
   12042 	case WM_T_82580:
   12043 	case WM_T_I350:
   12044 	case WM_T_I354:
   12045 	case WM_T_I210:
   12046 	case WM_T_I211:
   12047 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12048 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12049 		break;
   12050 	default:
   12051 		break;
   12052 	}
   12053 
   12054 	return ismdio;
   12055 }
   12056 
   12057 /* Setup internal SGMII PHY for SFP */
   12058 static void
   12059 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12060 {
   12061 	uint16_t id1, id2, phyreg;
   12062 	int i, rv;
   12063 
   12064 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12065 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12066 		return;
   12067 
   12068 	for (i = 0; i < MII_NPHY; i++) {
   12069 		sc->phy.no_errprint = true;
   12070 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12071 		if (rv != 0)
   12072 			continue;
   12073 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12074 		if (rv != 0)
   12075 			continue;
   12076 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12077 			continue;
   12078 		sc->phy.no_errprint = false;
   12079 
   12080 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12081 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12082 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12083 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12084 		break;
   12085 	}
   12086 
   12087 }
   12088 
   12089 /*
   12090  * wm_sgmii_readreg:	[mii interface function]
   12091  *
   12092  *	Read a PHY register on the SGMII
   12093  * This could be handled by the PHY layer if we didn't have to lock the
   12094  * resource ...
   12095  */
   12096 static int
   12097 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12098 {
   12099 	struct wm_softc *sc = device_private(dev);
   12100 	int rv;
   12101 
   12102 	if (sc->phy.acquire(sc)) {
   12103 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12104 		return -1;
   12105 	}
   12106 
   12107 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12108 
   12109 	sc->phy.release(sc);
   12110 	return rv;
   12111 }
   12112 
   12113 static int
   12114 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12115 {
   12116 	struct wm_softc *sc = device_private(dev);
   12117 	uint32_t i2ccmd;
   12118 	int i, rv = 0;
   12119 
   12120 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12121 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12122 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12123 
   12124 	/* Poll the ready bit */
   12125 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12126 		delay(50);
   12127 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12128 		if (i2ccmd & I2CCMD_READY)
   12129 			break;
   12130 	}
   12131 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12132 		device_printf(dev, "I2CCMD Read did not complete\n");
   12133 		rv = ETIMEDOUT;
   12134 	}
   12135 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12136 		if (!sc->phy.no_errprint)
   12137 			device_printf(dev, "I2CCMD Error bit set\n");
   12138 		rv = EIO;
   12139 	}
   12140 
   12141 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12142 
   12143 	return rv;
   12144 }
   12145 
   12146 /*
   12147  * wm_sgmii_writereg:	[mii interface function]
   12148  *
   12149  *	Write a PHY register on the SGMII.
   12150  * This could be handled by the PHY layer if we didn't have to lock the
   12151  * resource ...
   12152  */
   12153 static int
   12154 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12155 {
   12156 	struct wm_softc *sc = device_private(dev);
   12157 	int rv;
   12158 
   12159 	if (sc->phy.acquire(sc) != 0) {
   12160 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12161 		return -1;
   12162 	}
   12163 
   12164 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12165 
   12166 	sc->phy.release(sc);
   12167 
   12168 	return rv;
   12169 }
   12170 
   12171 static int
   12172 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12173 {
   12174 	struct wm_softc *sc = device_private(dev);
   12175 	uint32_t i2ccmd;
   12176 	uint16_t swapdata;
   12177 	int rv = 0;
   12178 	int i;
   12179 
   12180 	/* Swap the data bytes for the I2C interface */
   12181 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12182 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12183 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12184 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12185 
   12186 	/* Poll the ready bit */
   12187 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12188 		delay(50);
   12189 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12190 		if (i2ccmd & I2CCMD_READY)
   12191 			break;
   12192 	}
   12193 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12194 		device_printf(dev, "I2CCMD Write did not complete\n");
   12195 		rv = ETIMEDOUT;
   12196 	}
   12197 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12198 		device_printf(dev, "I2CCMD Error bit set\n");
   12199 		rv = EIO;
   12200 	}
   12201 
   12202 	return rv;
   12203 }
   12204 
   12205 /* TBI related */
   12206 
   12207 static bool
   12208 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12209 {
   12210 	bool sig;
   12211 
   12212 	sig = ctrl & CTRL_SWDPIN(1);
   12213 
   12214 	/*
   12215 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12216 	 * detect a signal, 1 if they don't.
   12217 	 */
   12218 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12219 		sig = !sig;
   12220 
   12221 	return sig;
   12222 }
   12223 
   12224 /*
   12225  * wm_tbi_mediainit:
   12226  *
   12227  *	Initialize media for use on 1000BASE-X devices.
   12228  */
   12229 static void
   12230 wm_tbi_mediainit(struct wm_softc *sc)
   12231 {
   12232 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12233 	const char *sep = "";
   12234 
   12235 	if (sc->sc_type < WM_T_82543)
   12236 		sc->sc_tipg = TIPG_WM_DFLT;
   12237 	else
   12238 		sc->sc_tipg = TIPG_LG_DFLT;
   12239 
   12240 	sc->sc_tbi_serdes_anegticks = 5;
   12241 
   12242 	/* Initialize our media structures */
   12243 	sc->sc_mii.mii_ifp = ifp;
   12244 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12245 
   12246 	ifp->if_baudrate = IF_Gbps(1);
   12247 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12248 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12249 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12250 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12251 		    sc->sc_core_lock);
   12252 	} else {
   12253 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12254 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12255 	}
   12256 
   12257 	/*
   12258 	 * SWD Pins:
   12259 	 *
   12260 	 *	0 = Link LED (output)
   12261 	 *	1 = Loss Of Signal (input)
   12262 	 */
   12263 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12264 
   12265 	/* XXX Perhaps this is only for TBI */
   12266 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12267 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12268 
   12269 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12270 		sc->sc_ctrl &= ~CTRL_LRST;
   12271 
   12272 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12273 
   12274 #define	ADD(ss, mm, dd)							\
   12275 do {									\
   12276 	aprint_normal("%s%s", sep, ss);					\
   12277 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12278 	sep = ", ";							\
   12279 } while (/*CONSTCOND*/0)
   12280 
   12281 	aprint_normal_dev(sc->sc_dev, "");
   12282 
   12283 	if (sc->sc_type == WM_T_I354) {
   12284 		uint32_t status;
   12285 
   12286 		status = CSR_READ(sc, WMREG_STATUS);
   12287 		if (((status & STATUS_2P5_SKU) != 0)
   12288 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12289 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12290 		} else
   12291 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12292 	} else if (sc->sc_type == WM_T_82545) {
   12293 		/* Only 82545 is LX (XXX except SFP) */
   12294 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12295 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12296 	} else if (sc->sc_sfptype != 0) {
   12297 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12298 		switch (sc->sc_sfptype) {
   12299 		default:
   12300 		case SFF_SFP_ETH_FLAGS_1000SX:
   12301 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12302 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12303 			break;
   12304 		case SFF_SFP_ETH_FLAGS_1000LX:
   12305 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12306 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12307 			break;
   12308 		case SFF_SFP_ETH_FLAGS_1000CX:
   12309 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12310 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12311 			break;
   12312 		case SFF_SFP_ETH_FLAGS_1000T:
   12313 			ADD("1000baseT", IFM_1000_T, 0);
   12314 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12315 			break;
   12316 		case SFF_SFP_ETH_FLAGS_100FX:
   12317 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12318 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12319 			break;
   12320 		}
   12321 	} else {
   12322 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12323 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12324 	}
   12325 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12326 	aprint_normal("\n");
   12327 
   12328 #undef ADD
   12329 
   12330 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12331 }
   12332 
   12333 /*
   12334  * wm_tbi_mediachange:	[ifmedia interface function]
   12335  *
   12336  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12337  */
   12338 static int
   12339 wm_tbi_mediachange(struct ifnet *ifp)
   12340 {
   12341 	struct wm_softc *sc = ifp->if_softc;
   12342 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12343 	uint32_t status, ctrl;
   12344 	bool signal;
   12345 	int i;
   12346 
   12347 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12348 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12349 		/* XXX need some work for >= 82571 and < 82575 */
   12350 		if (sc->sc_type < WM_T_82575)
   12351 			return 0;
   12352 	}
   12353 
   12354 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12355 	    || (sc->sc_type >= WM_T_82575))
   12356 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12357 
   12358 	sc->sc_ctrl &= ~CTRL_LRST;
   12359 	sc->sc_txcw = TXCW_ANE;
   12360 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12361 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12362 	else if (ife->ifm_media & IFM_FDX)
   12363 		sc->sc_txcw |= TXCW_FD;
   12364 	else
   12365 		sc->sc_txcw |= TXCW_HD;
   12366 
   12367 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12368 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12369 
   12370 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12371 		device_xname(sc->sc_dev), sc->sc_txcw));
   12372 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12373 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12374 	CSR_WRITE_FLUSH(sc);
   12375 	delay(1000);
   12376 
   12377 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12378 	signal = wm_tbi_havesignal(sc, ctrl);
   12379 
   12380 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12381 		signal));
   12382 
   12383 	if (signal) {
   12384 		/* Have signal; wait for the link to come up. */
   12385 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12386 			delay(10000);
   12387 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12388 				break;
   12389 		}
   12390 
   12391 		DPRINTF(sc, WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12392 			device_xname(sc->sc_dev), i));
   12393 
   12394 		status = CSR_READ(sc, WMREG_STATUS);
   12395 		DPRINTF(sc, WM_DEBUG_LINK,
   12396 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12397 			device_xname(sc->sc_dev), status, STATUS_LU));
   12398 		if (status & STATUS_LU) {
   12399 			/* Link is up. */
   12400 			DPRINTF(sc, WM_DEBUG_LINK,
   12401 			    ("%s: LINK: set media -> link up %s\n",
   12402 				device_xname(sc->sc_dev),
   12403 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12404 
   12405 			/*
   12406 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12407 			 * so we should update sc->sc_ctrl
   12408 			 */
   12409 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12410 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12411 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12412 			if (status & STATUS_FD)
   12413 				sc->sc_tctl |=
   12414 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12415 			else
   12416 				sc->sc_tctl |=
   12417 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12418 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12419 				sc->sc_fcrtl |= FCRTL_XONE;
   12420 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12421 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12422 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12423 			sc->sc_tbi_linkup = 1;
   12424 		} else {
   12425 			if (i == WM_LINKUP_TIMEOUT)
   12426 				wm_check_for_link(sc);
   12427 			/* Link is down. */
   12428 			DPRINTF(sc, WM_DEBUG_LINK,
   12429 			    ("%s: LINK: set media -> link down\n",
   12430 				device_xname(sc->sc_dev)));
   12431 			sc->sc_tbi_linkup = 0;
   12432 		}
   12433 	} else {
   12434 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12435 			device_xname(sc->sc_dev)));
   12436 		sc->sc_tbi_linkup = 0;
   12437 	}
   12438 
   12439 	wm_tbi_serdes_set_linkled(sc);
   12440 
   12441 	return 0;
   12442 }
   12443 
   12444 /*
   12445  * wm_tbi_mediastatus:	[ifmedia interface function]
   12446  *
   12447  *	Get the current interface media status on a 1000BASE-X device.
   12448  */
   12449 static void
   12450 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12451 {
   12452 	struct wm_softc *sc = ifp->if_softc;
   12453 	uint32_t ctrl, status;
   12454 
   12455 	ifmr->ifm_status = IFM_AVALID;
   12456 	ifmr->ifm_active = IFM_ETHER;
   12457 
   12458 	status = CSR_READ(sc, WMREG_STATUS);
   12459 	if ((status & STATUS_LU) == 0) {
   12460 		ifmr->ifm_active |= IFM_NONE;
   12461 		return;
   12462 	}
   12463 
   12464 	ifmr->ifm_status |= IFM_ACTIVE;
   12465 	/* Only 82545 is LX */
   12466 	if (sc->sc_type == WM_T_82545)
   12467 		ifmr->ifm_active |= IFM_1000_LX;
   12468 	else
   12469 		ifmr->ifm_active |= IFM_1000_SX;
   12470 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12471 		ifmr->ifm_active |= IFM_FDX;
   12472 	else
   12473 		ifmr->ifm_active |= IFM_HDX;
   12474 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12475 	if (ctrl & CTRL_RFCE)
   12476 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12477 	if (ctrl & CTRL_TFCE)
   12478 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12479 }
   12480 
   12481 /* XXX TBI only */
   12482 static int
   12483 wm_check_for_link(struct wm_softc *sc)
   12484 {
   12485 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12486 	uint32_t rxcw;
   12487 	uint32_t ctrl;
   12488 	uint32_t status;
   12489 	bool signal;
   12490 
   12491 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   12492 		device_xname(sc->sc_dev), __func__));
   12493 
   12494 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12495 		/* XXX need some work for >= 82571 */
   12496 		if (sc->sc_type >= WM_T_82571) {
   12497 			sc->sc_tbi_linkup = 1;
   12498 			return 0;
   12499 		}
   12500 	}
   12501 
   12502 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12503 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12504 	status = CSR_READ(sc, WMREG_STATUS);
   12505 	signal = wm_tbi_havesignal(sc, ctrl);
   12506 
   12507 	DPRINTF(sc, WM_DEBUG_LINK,
   12508 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12509 		device_xname(sc->sc_dev), __func__, signal,
   12510 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12511 
   12512 	/*
   12513 	 * SWDPIN   LU RXCW
   12514 	 *	0    0	  0
   12515 	 *	0    0	  1	(should not happen)
   12516 	 *	0    1	  0	(should not happen)
   12517 	 *	0    1	  1	(should not happen)
   12518 	 *	1    0	  0	Disable autonego and force linkup
   12519 	 *	1    0	  1	got /C/ but not linkup yet
   12520 	 *	1    1	  0	(linkup)
   12521 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12522 	 *
   12523 	 */
   12524 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12525 		DPRINTF(sc, WM_DEBUG_LINK,
   12526 		    ("%s: %s: force linkup and fullduplex\n",
   12527 			device_xname(sc->sc_dev), __func__));
   12528 		sc->sc_tbi_linkup = 0;
   12529 		/* Disable auto-negotiation in the TXCW register */
   12530 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12531 
   12532 		/*
   12533 		 * Force link-up and also force full-duplex.
   12534 		 *
   12535 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12536 		 * so we should update sc->sc_ctrl
   12537 		 */
   12538 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12539 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12540 	} else if (((status & STATUS_LU) != 0)
   12541 	    && ((rxcw & RXCW_C) != 0)
   12542 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12543 		sc->sc_tbi_linkup = 1;
   12544 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12545 			device_xname(sc->sc_dev),
   12546 			__func__));
   12547 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12548 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12549 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12550 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   12551 			device_xname(sc->sc_dev), __func__));
   12552 	} else {
   12553 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12554 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12555 			status));
   12556 	}
   12557 
   12558 	return 0;
   12559 }
   12560 
   12561 /*
   12562  * wm_tbi_tick:
   12563  *
   12564  *	Check the link on TBI devices.
   12565  *	This function acts as mii_tick().
   12566  */
   12567 static void
   12568 wm_tbi_tick(struct wm_softc *sc)
   12569 {
   12570 	struct mii_data *mii = &sc->sc_mii;
   12571 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12572 	uint32_t status;
   12573 
   12574 	KASSERT(WM_CORE_LOCKED(sc));
   12575 
   12576 	status = CSR_READ(sc, WMREG_STATUS);
   12577 
   12578 	/* XXX is this needed? */
   12579 	(void)CSR_READ(sc, WMREG_RXCW);
   12580 	(void)CSR_READ(sc, WMREG_CTRL);
   12581 
   12582 	/* set link status */
   12583 	if ((status & STATUS_LU) == 0) {
   12584 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12585 			device_xname(sc->sc_dev)));
   12586 		sc->sc_tbi_linkup = 0;
   12587 	} else if (sc->sc_tbi_linkup == 0) {
   12588 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12589 			device_xname(sc->sc_dev),
   12590 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12591 		sc->sc_tbi_linkup = 1;
   12592 		sc->sc_tbi_serdes_ticks = 0;
   12593 	}
   12594 
   12595 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12596 		goto setled;
   12597 
   12598 	if ((status & STATUS_LU) == 0) {
   12599 		sc->sc_tbi_linkup = 0;
   12600 		/* If the timer expired, retry autonegotiation */
   12601 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12602 		    && (++sc->sc_tbi_serdes_ticks
   12603 			>= sc->sc_tbi_serdes_anegticks)) {
   12604 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12605 				device_xname(sc->sc_dev), __func__));
   12606 			sc->sc_tbi_serdes_ticks = 0;
   12607 			/*
   12608 			 * Reset the link, and let autonegotiation do
   12609 			 * its thing
   12610 			 */
   12611 			sc->sc_ctrl |= CTRL_LRST;
   12612 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12613 			CSR_WRITE_FLUSH(sc);
   12614 			delay(1000);
   12615 			sc->sc_ctrl &= ~CTRL_LRST;
   12616 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12617 			CSR_WRITE_FLUSH(sc);
   12618 			delay(1000);
   12619 			CSR_WRITE(sc, WMREG_TXCW,
   12620 			    sc->sc_txcw & ~TXCW_ANE);
   12621 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12622 		}
   12623 	}
   12624 
   12625 setled:
   12626 	wm_tbi_serdes_set_linkled(sc);
   12627 }
   12628 
   12629 /* SERDES related */
   12630 static void
   12631 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12632 {
   12633 	uint32_t reg;
   12634 
   12635 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12636 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12637 		return;
   12638 
   12639 	/* Enable PCS to turn on link */
   12640 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12641 	reg |= PCS_CFG_PCS_EN;
   12642 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12643 
   12644 	/* Power up the laser */
   12645 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12646 	reg &= ~CTRL_EXT_SWDPIN(3);
   12647 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12648 
   12649 	/* Flush the write to verify completion */
   12650 	CSR_WRITE_FLUSH(sc);
   12651 	delay(1000);
   12652 }
   12653 
   12654 static int
   12655 wm_serdes_mediachange(struct ifnet *ifp)
   12656 {
   12657 	struct wm_softc *sc = ifp->if_softc;
   12658 	bool pcs_autoneg = true; /* XXX */
   12659 	uint32_t ctrl_ext, pcs_lctl, reg;
   12660 
   12661 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12662 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12663 		return 0;
   12664 
   12665 	/* XXX Currently, this function is not called on 8257[12] */
   12666 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12667 	    || (sc->sc_type >= WM_T_82575))
   12668 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12669 
   12670 	/* Power on the sfp cage if present */
   12671 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12672 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12673 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12674 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12675 
   12676 	sc->sc_ctrl |= CTRL_SLU;
   12677 
   12678 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   12679 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12680 
   12681 		reg = CSR_READ(sc, WMREG_CONNSW);
   12682 		reg |= CONNSW_ENRGSRC;
   12683 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   12684 	}
   12685 
   12686 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12687 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12688 	case CTRL_EXT_LINK_MODE_SGMII:
   12689 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12690 		pcs_autoneg = true;
   12691 		/* Autoneg time out should be disabled for SGMII mode */
   12692 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12693 		break;
   12694 	case CTRL_EXT_LINK_MODE_1000KX:
   12695 		pcs_autoneg = false;
   12696 		/* FALLTHROUGH */
   12697 	default:
   12698 		if ((sc->sc_type == WM_T_82575)
   12699 		    || (sc->sc_type == WM_T_82576)) {
   12700 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12701 				pcs_autoneg = false;
   12702 		}
   12703 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12704 		    | CTRL_FRCFDX;
   12705 
   12706 		/* Set speed of 1000/Full if speed/duplex is forced */
   12707 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12708 	}
   12709 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12710 
   12711 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12712 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12713 
   12714 	if (pcs_autoneg) {
   12715 		/* Set PCS register for autoneg */
   12716 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12717 
   12718 		/* Disable force flow control for autoneg */
   12719 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12720 
   12721 		/* Configure flow control advertisement for autoneg */
   12722 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12723 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12724 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12725 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12726 	} else
   12727 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12728 
   12729 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12730 
   12731 	return 0;
   12732 }
   12733 
   12734 static void
   12735 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12736 {
   12737 	struct wm_softc *sc = ifp->if_softc;
   12738 	struct mii_data *mii = &sc->sc_mii;
   12739 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12740 	uint32_t pcs_adv, pcs_lpab, reg;
   12741 
   12742 	ifmr->ifm_status = IFM_AVALID;
   12743 	ifmr->ifm_active = IFM_ETHER;
   12744 
   12745 	/* Check PCS */
   12746 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12747 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12748 		ifmr->ifm_active |= IFM_NONE;
   12749 		sc->sc_tbi_linkup = 0;
   12750 		goto setled;
   12751 	}
   12752 
   12753 	sc->sc_tbi_linkup = 1;
   12754 	ifmr->ifm_status |= IFM_ACTIVE;
   12755 	if (sc->sc_type == WM_T_I354) {
   12756 		uint32_t status;
   12757 
   12758 		status = CSR_READ(sc, WMREG_STATUS);
   12759 		if (((status & STATUS_2P5_SKU) != 0)
   12760 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12761 			ifmr->ifm_active |= IFM_2500_KX;
   12762 		} else
   12763 			ifmr->ifm_active |= IFM_1000_KX;
   12764 	} else {
   12765 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12766 		case PCS_LSTS_SPEED_10:
   12767 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12768 			break;
   12769 		case PCS_LSTS_SPEED_100:
   12770 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12771 			break;
   12772 		case PCS_LSTS_SPEED_1000:
   12773 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12774 			break;
   12775 		default:
   12776 			device_printf(sc->sc_dev, "Unknown speed\n");
   12777 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12778 			break;
   12779 		}
   12780 	}
   12781 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   12782 	if ((reg & PCS_LSTS_FDX) != 0)
   12783 		ifmr->ifm_active |= IFM_FDX;
   12784 	else
   12785 		ifmr->ifm_active |= IFM_HDX;
   12786 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12787 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12788 		/* Check flow */
   12789 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12790 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12791 			DPRINTF(sc, WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12792 			goto setled;
   12793 		}
   12794 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12795 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12796 		DPRINTF(sc, WM_DEBUG_LINK,
   12797 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12798 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12799 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12800 			mii->mii_media_active |= IFM_FLOW
   12801 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12802 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12803 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12804 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12805 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12806 			mii->mii_media_active |= IFM_FLOW
   12807 			    | IFM_ETH_TXPAUSE;
   12808 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12809 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12810 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12811 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12812 			mii->mii_media_active |= IFM_FLOW
   12813 			    | IFM_ETH_RXPAUSE;
   12814 		}
   12815 	}
   12816 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12817 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12818 setled:
   12819 	wm_tbi_serdes_set_linkled(sc);
   12820 }
   12821 
   12822 /*
   12823  * wm_serdes_tick:
   12824  *
   12825  *	Check the link on serdes devices.
   12826  */
   12827 static void
   12828 wm_serdes_tick(struct wm_softc *sc)
   12829 {
   12830 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12831 	struct mii_data *mii = &sc->sc_mii;
   12832 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12833 	uint32_t reg;
   12834 
   12835 	KASSERT(WM_CORE_LOCKED(sc));
   12836 
   12837 	mii->mii_media_status = IFM_AVALID;
   12838 	mii->mii_media_active = IFM_ETHER;
   12839 
   12840 	/* Check PCS */
   12841 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12842 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12843 		mii->mii_media_status |= IFM_ACTIVE;
   12844 		sc->sc_tbi_linkup = 1;
   12845 		sc->sc_tbi_serdes_ticks = 0;
   12846 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12847 		if ((reg & PCS_LSTS_FDX) != 0)
   12848 			mii->mii_media_active |= IFM_FDX;
   12849 		else
   12850 			mii->mii_media_active |= IFM_HDX;
   12851 	} else {
   12852 		mii->mii_media_status |= IFM_NONE;
   12853 		sc->sc_tbi_linkup = 0;
   12854 		/* If the timer expired, retry autonegotiation */
   12855 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12856 		    && (++sc->sc_tbi_serdes_ticks
   12857 			>= sc->sc_tbi_serdes_anegticks)) {
   12858 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12859 				device_xname(sc->sc_dev), __func__));
   12860 			sc->sc_tbi_serdes_ticks = 0;
   12861 			/* XXX */
   12862 			wm_serdes_mediachange(ifp);
   12863 		}
   12864 	}
   12865 
   12866 	wm_tbi_serdes_set_linkled(sc);
   12867 }
   12868 
   12869 /* SFP related */
   12870 
   12871 static int
   12872 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12873 {
   12874 	uint32_t i2ccmd;
   12875 	int i;
   12876 
   12877 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12878 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12879 
   12880 	/* Poll the ready bit */
   12881 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12882 		delay(50);
   12883 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12884 		if (i2ccmd & I2CCMD_READY)
   12885 			break;
   12886 	}
   12887 	if ((i2ccmd & I2CCMD_READY) == 0)
   12888 		return -1;
   12889 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12890 		return -1;
   12891 
   12892 	*data = i2ccmd & 0x00ff;
   12893 
   12894 	return 0;
   12895 }
   12896 
   12897 static uint32_t
   12898 wm_sfp_get_media_type(struct wm_softc *sc)
   12899 {
   12900 	uint32_t ctrl_ext;
   12901 	uint8_t val = 0;
   12902 	int timeout = 3;
   12903 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12904 	int rv = -1;
   12905 
   12906 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12907 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12908 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12909 	CSR_WRITE_FLUSH(sc);
   12910 
   12911 	/* Read SFP module data */
   12912 	while (timeout) {
   12913 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12914 		if (rv == 0)
   12915 			break;
   12916 		delay(100*1000); /* XXX too big */
   12917 		timeout--;
   12918 	}
   12919 	if (rv != 0)
   12920 		goto out;
   12921 
   12922 	switch (val) {
   12923 	case SFF_SFP_ID_SFF:
   12924 		aprint_normal_dev(sc->sc_dev,
   12925 		    "Module/Connector soldered to board\n");
   12926 		break;
   12927 	case SFF_SFP_ID_SFP:
   12928 		sc->sc_flags |= WM_F_SFP;
   12929 		break;
   12930 	case SFF_SFP_ID_UNKNOWN:
   12931 		goto out;
   12932 	default:
   12933 		break;
   12934 	}
   12935 
   12936 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12937 	if (rv != 0)
   12938 		goto out;
   12939 
   12940 	sc->sc_sfptype = val;
   12941 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12942 		mediatype = WM_MEDIATYPE_SERDES;
   12943 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12944 		sc->sc_flags |= WM_F_SGMII;
   12945 		mediatype = WM_MEDIATYPE_COPPER;
   12946 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12947 		sc->sc_flags |= WM_F_SGMII;
   12948 		mediatype = WM_MEDIATYPE_SERDES;
   12949 	} else {
   12950 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   12951 		    __func__, sc->sc_sfptype);
   12952 		sc->sc_sfptype = 0; /* XXX unknown */
   12953 	}
   12954 
   12955 out:
   12956 	/* Restore I2C interface setting */
   12957 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12958 
   12959 	return mediatype;
   12960 }
   12961 
   12962 /*
   12963  * NVM related.
   12964  * Microwire, SPI (w/wo EERD) and Flash.
   12965  */
   12966 
   12967 /* Both spi and uwire */
   12968 
   12969 /*
   12970  * wm_eeprom_sendbits:
   12971  *
   12972  *	Send a series of bits to the EEPROM.
   12973  */
   12974 static void
   12975 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12976 {
   12977 	uint32_t reg;
   12978 	int x;
   12979 
   12980 	reg = CSR_READ(sc, WMREG_EECD);
   12981 
   12982 	for (x = nbits; x > 0; x--) {
   12983 		if (bits & (1U << (x - 1)))
   12984 			reg |= EECD_DI;
   12985 		else
   12986 			reg &= ~EECD_DI;
   12987 		CSR_WRITE(sc, WMREG_EECD, reg);
   12988 		CSR_WRITE_FLUSH(sc);
   12989 		delay(2);
   12990 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12991 		CSR_WRITE_FLUSH(sc);
   12992 		delay(2);
   12993 		CSR_WRITE(sc, WMREG_EECD, reg);
   12994 		CSR_WRITE_FLUSH(sc);
   12995 		delay(2);
   12996 	}
   12997 }
   12998 
   12999 /*
   13000  * wm_eeprom_recvbits:
   13001  *
   13002  *	Receive a series of bits from the EEPROM.
   13003  */
   13004 static void
   13005 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13006 {
   13007 	uint32_t reg, val;
   13008 	int x;
   13009 
   13010 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13011 
   13012 	val = 0;
   13013 	for (x = nbits; x > 0; x--) {
   13014 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13015 		CSR_WRITE_FLUSH(sc);
   13016 		delay(2);
   13017 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13018 			val |= (1U << (x - 1));
   13019 		CSR_WRITE(sc, WMREG_EECD, reg);
   13020 		CSR_WRITE_FLUSH(sc);
   13021 		delay(2);
   13022 	}
   13023 	*valp = val;
   13024 }
   13025 
   13026 /* Microwire */
   13027 
   13028 /*
   13029  * wm_nvm_read_uwire:
   13030  *
   13031  *	Read a word from the EEPROM using the MicroWire protocol.
   13032  */
   13033 static int
   13034 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13035 {
   13036 	uint32_t reg, val;
   13037 	int i;
   13038 
   13039 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13040 		device_xname(sc->sc_dev), __func__));
   13041 
   13042 	if (sc->nvm.acquire(sc) != 0)
   13043 		return -1;
   13044 
   13045 	for (i = 0; i < wordcnt; i++) {
   13046 		/* Clear SK and DI. */
   13047 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13048 		CSR_WRITE(sc, WMREG_EECD, reg);
   13049 
   13050 		/*
   13051 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13052 		 * and Xen.
   13053 		 *
   13054 		 * We use this workaround only for 82540 because qemu's
   13055 		 * e1000 act as 82540.
   13056 		 */
   13057 		if (sc->sc_type == WM_T_82540) {
   13058 			reg |= EECD_SK;
   13059 			CSR_WRITE(sc, WMREG_EECD, reg);
   13060 			reg &= ~EECD_SK;
   13061 			CSR_WRITE(sc, WMREG_EECD, reg);
   13062 			CSR_WRITE_FLUSH(sc);
   13063 			delay(2);
   13064 		}
   13065 		/* XXX: end of workaround */
   13066 
   13067 		/* Set CHIP SELECT. */
   13068 		reg |= EECD_CS;
   13069 		CSR_WRITE(sc, WMREG_EECD, reg);
   13070 		CSR_WRITE_FLUSH(sc);
   13071 		delay(2);
   13072 
   13073 		/* Shift in the READ command. */
   13074 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13075 
   13076 		/* Shift in address. */
   13077 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13078 
   13079 		/* Shift out the data. */
   13080 		wm_eeprom_recvbits(sc, &val, 16);
   13081 		data[i] = val & 0xffff;
   13082 
   13083 		/* Clear CHIP SELECT. */
   13084 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13085 		CSR_WRITE(sc, WMREG_EECD, reg);
   13086 		CSR_WRITE_FLUSH(sc);
   13087 		delay(2);
   13088 	}
   13089 
   13090 	sc->nvm.release(sc);
   13091 	return 0;
   13092 }
   13093 
   13094 /* SPI */
   13095 
   13096 /*
   13097  * Set SPI and FLASH related information from the EECD register.
   13098  * For 82541 and 82547, the word size is taken from EEPROM.
   13099  */
   13100 static int
   13101 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13102 {
   13103 	int size;
   13104 	uint32_t reg;
   13105 	uint16_t data;
   13106 
   13107 	reg = CSR_READ(sc, WMREG_EECD);
   13108 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13109 
   13110 	/* Read the size of NVM from EECD by default */
   13111 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13112 	switch (sc->sc_type) {
   13113 	case WM_T_82541:
   13114 	case WM_T_82541_2:
   13115 	case WM_T_82547:
   13116 	case WM_T_82547_2:
   13117 		/* Set dummy value to access EEPROM */
   13118 		sc->sc_nvm_wordsize = 64;
   13119 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13120 			aprint_error_dev(sc->sc_dev,
   13121 			    "%s: failed to read EEPROM size\n", __func__);
   13122 		}
   13123 		reg = data;
   13124 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13125 		if (size == 0)
   13126 			size = 6; /* 64 word size */
   13127 		else
   13128 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13129 		break;
   13130 	case WM_T_80003:
   13131 	case WM_T_82571:
   13132 	case WM_T_82572:
   13133 	case WM_T_82573: /* SPI case */
   13134 	case WM_T_82574: /* SPI case */
   13135 	case WM_T_82583: /* SPI case */
   13136 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13137 		if (size > 14)
   13138 			size = 14;
   13139 		break;
   13140 	case WM_T_82575:
   13141 	case WM_T_82576:
   13142 	case WM_T_82580:
   13143 	case WM_T_I350:
   13144 	case WM_T_I354:
   13145 	case WM_T_I210:
   13146 	case WM_T_I211:
   13147 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13148 		if (size > 15)
   13149 			size = 15;
   13150 		break;
   13151 	default:
   13152 		aprint_error_dev(sc->sc_dev,
   13153 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13154 		return -1;
   13155 		break;
   13156 	}
   13157 
   13158 	sc->sc_nvm_wordsize = 1 << size;
   13159 
   13160 	return 0;
   13161 }
   13162 
   13163 /*
   13164  * wm_nvm_ready_spi:
   13165  *
   13166  *	Wait for a SPI EEPROM to be ready for commands.
   13167  */
   13168 static int
   13169 wm_nvm_ready_spi(struct wm_softc *sc)
   13170 {
   13171 	uint32_t val;
   13172 	int usec;
   13173 
   13174 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13175 		device_xname(sc->sc_dev), __func__));
   13176 
   13177 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13178 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13179 		wm_eeprom_recvbits(sc, &val, 8);
   13180 		if ((val & SPI_SR_RDY) == 0)
   13181 			break;
   13182 	}
   13183 	if (usec >= SPI_MAX_RETRIES) {
   13184 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13185 		return -1;
   13186 	}
   13187 	return 0;
   13188 }
   13189 
   13190 /*
   13191  * wm_nvm_read_spi:
   13192  *
   13193  *	Read a work from the EEPROM using the SPI protocol.
   13194  */
   13195 static int
   13196 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13197 {
   13198 	uint32_t reg, val;
   13199 	int i;
   13200 	uint8_t opc;
   13201 	int rv = 0;
   13202 
   13203 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13204 		device_xname(sc->sc_dev), __func__));
   13205 
   13206 	if (sc->nvm.acquire(sc) != 0)
   13207 		return -1;
   13208 
   13209 	/* Clear SK and CS. */
   13210 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13211 	CSR_WRITE(sc, WMREG_EECD, reg);
   13212 	CSR_WRITE_FLUSH(sc);
   13213 	delay(2);
   13214 
   13215 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13216 		goto out;
   13217 
   13218 	/* Toggle CS to flush commands. */
   13219 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13220 	CSR_WRITE_FLUSH(sc);
   13221 	delay(2);
   13222 	CSR_WRITE(sc, WMREG_EECD, reg);
   13223 	CSR_WRITE_FLUSH(sc);
   13224 	delay(2);
   13225 
   13226 	opc = SPI_OPC_READ;
   13227 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13228 		opc |= SPI_OPC_A8;
   13229 
   13230 	wm_eeprom_sendbits(sc, opc, 8);
   13231 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13232 
   13233 	for (i = 0; i < wordcnt; i++) {
   13234 		wm_eeprom_recvbits(sc, &val, 16);
   13235 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13236 	}
   13237 
   13238 	/* Raise CS and clear SK. */
   13239 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13240 	CSR_WRITE(sc, WMREG_EECD, reg);
   13241 	CSR_WRITE_FLUSH(sc);
   13242 	delay(2);
   13243 
   13244 out:
   13245 	sc->nvm.release(sc);
   13246 	return rv;
   13247 }
   13248 
   13249 /* Using with EERD */
   13250 
   13251 static int
   13252 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13253 {
   13254 	uint32_t attempts = 100000;
   13255 	uint32_t i, reg = 0;
   13256 	int32_t done = -1;
   13257 
   13258 	for (i = 0; i < attempts; i++) {
   13259 		reg = CSR_READ(sc, rw);
   13260 
   13261 		if (reg & EERD_DONE) {
   13262 			done = 0;
   13263 			break;
   13264 		}
   13265 		delay(5);
   13266 	}
   13267 
   13268 	return done;
   13269 }
   13270 
   13271 static int
   13272 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13273 {
   13274 	int i, eerd = 0;
   13275 	int rv = 0;
   13276 
   13277 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13278 		device_xname(sc->sc_dev), __func__));
   13279 
   13280 	if (sc->nvm.acquire(sc) != 0)
   13281 		return -1;
   13282 
   13283 	for (i = 0; i < wordcnt; i++) {
   13284 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13285 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13286 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13287 		if (rv != 0) {
   13288 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13289 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13290 			break;
   13291 		}
   13292 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13293 	}
   13294 
   13295 	sc->nvm.release(sc);
   13296 	return rv;
   13297 }
   13298 
   13299 /* Flash */
   13300 
   13301 static int
   13302 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13303 {
   13304 	uint32_t eecd;
   13305 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13306 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13307 	uint32_t nvm_dword = 0;
   13308 	uint8_t sig_byte = 0;
   13309 	int rv;
   13310 
   13311 	switch (sc->sc_type) {
   13312 	case WM_T_PCH_SPT:
   13313 	case WM_T_PCH_CNP:
   13314 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13315 		act_offset = ICH_NVM_SIG_WORD * 2;
   13316 
   13317 		/* Set bank to 0 in case flash read fails. */
   13318 		*bank = 0;
   13319 
   13320 		/* Check bank 0 */
   13321 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13322 		if (rv != 0)
   13323 			return rv;
   13324 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13325 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13326 			*bank = 0;
   13327 			return 0;
   13328 		}
   13329 
   13330 		/* Check bank 1 */
   13331 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13332 		    &nvm_dword);
   13333 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13334 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13335 			*bank = 1;
   13336 			return 0;
   13337 		}
   13338 		aprint_error_dev(sc->sc_dev,
   13339 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13340 		return -1;
   13341 	case WM_T_ICH8:
   13342 	case WM_T_ICH9:
   13343 		eecd = CSR_READ(sc, WMREG_EECD);
   13344 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13345 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13346 			return 0;
   13347 		}
   13348 		/* FALLTHROUGH */
   13349 	default:
   13350 		/* Default to 0 */
   13351 		*bank = 0;
   13352 
   13353 		/* Check bank 0 */
   13354 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13355 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13356 			*bank = 0;
   13357 			return 0;
   13358 		}
   13359 
   13360 		/* Check bank 1 */
   13361 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13362 		    &sig_byte);
   13363 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13364 			*bank = 1;
   13365 			return 0;
   13366 		}
   13367 	}
   13368 
   13369 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13370 		device_xname(sc->sc_dev)));
   13371 	return -1;
   13372 }
   13373 
   13374 /******************************************************************************
   13375  * This function does initial flash setup so that a new read/write/erase cycle
   13376  * can be started.
   13377  *
   13378  * sc - The pointer to the hw structure
   13379  ****************************************************************************/
   13380 static int32_t
   13381 wm_ich8_cycle_init(struct wm_softc *sc)
   13382 {
   13383 	uint16_t hsfsts;
   13384 	int32_t error = 1;
   13385 	int32_t i     = 0;
   13386 
   13387 	if (sc->sc_type >= WM_T_PCH_SPT)
   13388 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13389 	else
   13390 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13391 
   13392 	/* May be check the Flash Des Valid bit in Hw status */
   13393 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13394 		return error;
   13395 
   13396 	/* Clear FCERR in Hw status by writing 1 */
   13397 	/* Clear DAEL in Hw status by writing a 1 */
   13398 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13399 
   13400 	if (sc->sc_type >= WM_T_PCH_SPT)
   13401 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13402 	else
   13403 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13404 
   13405 	/*
   13406 	 * Either we should have a hardware SPI cycle in progress bit to check
   13407 	 * against, in order to start a new cycle or FDONE bit should be
   13408 	 * changed in the hardware so that it is 1 after hardware reset, which
   13409 	 * can then be used as an indication whether a cycle is in progress or
   13410 	 * has been completed .. we should also have some software semaphore
   13411 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13412 	 * threads access to those bits can be sequentiallized or a way so that
   13413 	 * 2 threads don't start the cycle at the same time
   13414 	 */
   13415 
   13416 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13417 		/*
   13418 		 * There is no cycle running at present, so we can start a
   13419 		 * cycle
   13420 		 */
   13421 
   13422 		/* Begin by setting Flash Cycle Done. */
   13423 		hsfsts |= HSFSTS_DONE;
   13424 		if (sc->sc_type >= WM_T_PCH_SPT)
   13425 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13426 			    hsfsts & 0xffffUL);
   13427 		else
   13428 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13429 		error = 0;
   13430 	} else {
   13431 		/*
   13432 		 * Otherwise poll for sometime so the current cycle has a
   13433 		 * chance to end before giving up.
   13434 		 */
   13435 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13436 			if (sc->sc_type >= WM_T_PCH_SPT)
   13437 				hsfsts = ICH8_FLASH_READ32(sc,
   13438 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13439 			else
   13440 				hsfsts = ICH8_FLASH_READ16(sc,
   13441 				    ICH_FLASH_HSFSTS);
   13442 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13443 				error = 0;
   13444 				break;
   13445 			}
   13446 			delay(1);
   13447 		}
   13448 		if (error == 0) {
   13449 			/*
   13450 			 * Successful in waiting for previous cycle to timeout,
   13451 			 * now set the Flash Cycle Done.
   13452 			 */
   13453 			hsfsts |= HSFSTS_DONE;
   13454 			if (sc->sc_type >= WM_T_PCH_SPT)
   13455 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13456 				    hsfsts & 0xffffUL);
   13457 			else
   13458 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13459 				    hsfsts);
   13460 		}
   13461 	}
   13462 	return error;
   13463 }
   13464 
   13465 /******************************************************************************
   13466  * This function starts a flash cycle and waits for its completion
   13467  *
   13468  * sc - The pointer to the hw structure
   13469  ****************************************************************************/
   13470 static int32_t
   13471 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13472 {
   13473 	uint16_t hsflctl;
   13474 	uint16_t hsfsts;
   13475 	int32_t error = 1;
   13476 	uint32_t i = 0;
   13477 
   13478 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13479 	if (sc->sc_type >= WM_T_PCH_SPT)
   13480 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13481 	else
   13482 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13483 	hsflctl |= HSFCTL_GO;
   13484 	if (sc->sc_type >= WM_T_PCH_SPT)
   13485 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13486 		    (uint32_t)hsflctl << 16);
   13487 	else
   13488 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13489 
   13490 	/* Wait till FDONE bit is set to 1 */
   13491 	do {
   13492 		if (sc->sc_type >= WM_T_PCH_SPT)
   13493 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13494 			    & 0xffffUL;
   13495 		else
   13496 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13497 		if (hsfsts & HSFSTS_DONE)
   13498 			break;
   13499 		delay(1);
   13500 		i++;
   13501 	} while (i < timeout);
   13502 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13503 		error = 0;
   13504 
   13505 	return error;
   13506 }
   13507 
   13508 /******************************************************************************
   13509  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13510  *
   13511  * sc - The pointer to the hw structure
   13512  * index - The index of the byte or word to read.
   13513  * size - Size of data to read, 1=byte 2=word, 4=dword
   13514  * data - Pointer to the word to store the value read.
   13515  *****************************************************************************/
   13516 static int32_t
   13517 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13518     uint32_t size, uint32_t *data)
   13519 {
   13520 	uint16_t hsfsts;
   13521 	uint16_t hsflctl;
   13522 	uint32_t flash_linear_address;
   13523 	uint32_t flash_data = 0;
   13524 	int32_t error = 1;
   13525 	int32_t count = 0;
   13526 
   13527 	if (size < 1  || size > 4 || data == 0x0 ||
   13528 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13529 		return error;
   13530 
   13531 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13532 	    sc->sc_ich8_flash_base;
   13533 
   13534 	do {
   13535 		delay(1);
   13536 		/* Steps */
   13537 		error = wm_ich8_cycle_init(sc);
   13538 		if (error)
   13539 			break;
   13540 
   13541 		if (sc->sc_type >= WM_T_PCH_SPT)
   13542 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13543 			    >> 16;
   13544 		else
   13545 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13546 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13547 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13548 		    & HSFCTL_BCOUNT_MASK;
   13549 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13550 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13551 			/*
   13552 			 * In SPT, This register is in Lan memory space, not
   13553 			 * flash. Therefore, only 32 bit access is supported.
   13554 			 */
   13555 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13556 			    (uint32_t)hsflctl << 16);
   13557 		} else
   13558 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13559 
   13560 		/*
   13561 		 * Write the last 24 bits of index into Flash Linear address
   13562 		 * field in Flash Address
   13563 		 */
   13564 		/* TODO: TBD maybe check the index against the size of flash */
   13565 
   13566 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13567 
   13568 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13569 
   13570 		/*
   13571 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13572 		 * the whole sequence a few more times, else read in (shift in)
   13573 		 * the Flash Data0, the order is least significant byte first
   13574 		 * msb to lsb
   13575 		 */
   13576 		if (error == 0) {
   13577 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13578 			if (size == 1)
   13579 				*data = (uint8_t)(flash_data & 0x000000FF);
   13580 			else if (size == 2)
   13581 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13582 			else if (size == 4)
   13583 				*data = (uint32_t)flash_data;
   13584 			break;
   13585 		} else {
   13586 			/*
   13587 			 * If we've gotten here, then things are probably
   13588 			 * completely hosed, but if the error condition is
   13589 			 * detected, it won't hurt to give it another try...
   13590 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13591 			 */
   13592 			if (sc->sc_type >= WM_T_PCH_SPT)
   13593 				hsfsts = ICH8_FLASH_READ32(sc,
   13594 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13595 			else
   13596 				hsfsts = ICH8_FLASH_READ16(sc,
   13597 				    ICH_FLASH_HSFSTS);
   13598 
   13599 			if (hsfsts & HSFSTS_ERR) {
   13600 				/* Repeat for some time before giving up. */
   13601 				continue;
   13602 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13603 				break;
   13604 		}
   13605 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13606 
   13607 	return error;
   13608 }
   13609 
   13610 /******************************************************************************
   13611  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13612  *
   13613  * sc - pointer to wm_hw structure
   13614  * index - The index of the byte to read.
   13615  * data - Pointer to a byte to store the value read.
   13616  *****************************************************************************/
   13617 static int32_t
   13618 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13619 {
   13620 	int32_t status;
   13621 	uint32_t word = 0;
   13622 
   13623 	status = wm_read_ich8_data(sc, index, 1, &word);
   13624 	if (status == 0)
   13625 		*data = (uint8_t)word;
   13626 	else
   13627 		*data = 0;
   13628 
   13629 	return status;
   13630 }
   13631 
   13632 /******************************************************************************
   13633  * Reads a word from the NVM using the ICH8 flash access registers.
   13634  *
   13635  * sc - pointer to wm_hw structure
   13636  * index - The starting byte index of the word to read.
   13637  * data - Pointer to a word to store the value read.
   13638  *****************************************************************************/
   13639 static int32_t
   13640 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13641 {
   13642 	int32_t status;
   13643 	uint32_t word = 0;
   13644 
   13645 	status = wm_read_ich8_data(sc, index, 2, &word);
   13646 	if (status == 0)
   13647 		*data = (uint16_t)word;
   13648 	else
   13649 		*data = 0;
   13650 
   13651 	return status;
   13652 }
   13653 
   13654 /******************************************************************************
   13655  * Reads a dword from the NVM using the ICH8 flash access registers.
   13656  *
   13657  * sc - pointer to wm_hw structure
   13658  * index - The starting byte index of the word to read.
   13659  * data - Pointer to a word to store the value read.
   13660  *****************************************************************************/
   13661 static int32_t
   13662 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13663 {
   13664 	int32_t status;
   13665 
   13666 	status = wm_read_ich8_data(sc, index, 4, data);
   13667 	return status;
   13668 }
   13669 
   13670 /******************************************************************************
   13671  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13672  * register.
   13673  *
   13674  * sc - Struct containing variables accessed by shared code
   13675  * offset - offset of word in the EEPROM to read
   13676  * data - word read from the EEPROM
   13677  * words - number of words to read
   13678  *****************************************************************************/
   13679 static int
   13680 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13681 {
   13682 	int32_t	 rv = 0;
   13683 	uint32_t flash_bank = 0;
   13684 	uint32_t act_offset = 0;
   13685 	uint32_t bank_offset = 0;
   13686 	uint16_t word = 0;
   13687 	uint16_t i = 0;
   13688 
   13689 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13690 		device_xname(sc->sc_dev), __func__));
   13691 
   13692 	if (sc->nvm.acquire(sc) != 0)
   13693 		return -1;
   13694 
   13695 	/*
   13696 	 * We need to know which is the valid flash bank.  In the event
   13697 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13698 	 * managing flash_bank. So it cannot be trusted and needs
   13699 	 * to be updated with each read.
   13700 	 */
   13701 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13702 	if (rv) {
   13703 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13704 			device_xname(sc->sc_dev)));
   13705 		flash_bank = 0;
   13706 	}
   13707 
   13708 	/*
   13709 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13710 	 * size
   13711 	 */
   13712 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13713 
   13714 	for (i = 0; i < words; i++) {
   13715 		/* The NVM part needs a byte offset, hence * 2 */
   13716 		act_offset = bank_offset + ((offset + i) * 2);
   13717 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13718 		if (rv) {
   13719 			aprint_error_dev(sc->sc_dev,
   13720 			    "%s: failed to read NVM\n", __func__);
   13721 			break;
   13722 		}
   13723 		data[i] = word;
   13724 	}
   13725 
   13726 	sc->nvm.release(sc);
   13727 	return rv;
   13728 }
   13729 
   13730 /******************************************************************************
   13731  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13732  * register.
   13733  *
   13734  * sc - Struct containing variables accessed by shared code
   13735  * offset - offset of word in the EEPROM to read
   13736  * data - word read from the EEPROM
   13737  * words - number of words to read
   13738  *****************************************************************************/
   13739 static int
   13740 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13741 {
   13742 	int32_t	 rv = 0;
   13743 	uint32_t flash_bank = 0;
   13744 	uint32_t act_offset = 0;
   13745 	uint32_t bank_offset = 0;
   13746 	uint32_t dword = 0;
   13747 	uint16_t i = 0;
   13748 
   13749 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13750 		device_xname(sc->sc_dev), __func__));
   13751 
   13752 	if (sc->nvm.acquire(sc) != 0)
   13753 		return -1;
   13754 
   13755 	/*
   13756 	 * We need to know which is the valid flash bank.  In the event
   13757 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13758 	 * managing flash_bank. So it cannot be trusted and needs
   13759 	 * to be updated with each read.
   13760 	 */
   13761 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13762 	if (rv) {
   13763 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13764 			device_xname(sc->sc_dev)));
   13765 		flash_bank = 0;
   13766 	}
   13767 
   13768 	/*
   13769 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13770 	 * size
   13771 	 */
   13772 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13773 
   13774 	for (i = 0; i < words; i++) {
   13775 		/* The NVM part needs a byte offset, hence * 2 */
   13776 		act_offset = bank_offset + ((offset + i) * 2);
   13777 		/* but we must read dword aligned, so mask ... */
   13778 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13779 		if (rv) {
   13780 			aprint_error_dev(sc->sc_dev,
   13781 			    "%s: failed to read NVM\n", __func__);
   13782 			break;
   13783 		}
   13784 		/* ... and pick out low or high word */
   13785 		if ((act_offset & 0x2) == 0)
   13786 			data[i] = (uint16_t)(dword & 0xFFFF);
   13787 		else
   13788 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13789 	}
   13790 
   13791 	sc->nvm.release(sc);
   13792 	return rv;
   13793 }
   13794 
   13795 /* iNVM */
   13796 
   13797 static int
   13798 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13799 {
   13800 	int32_t	 rv = 0;
   13801 	uint32_t invm_dword;
   13802 	uint16_t i;
   13803 	uint8_t record_type, word_address;
   13804 
   13805 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13806 		device_xname(sc->sc_dev), __func__));
   13807 
   13808 	for (i = 0; i < INVM_SIZE; i++) {
   13809 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13810 		/* Get record type */
   13811 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13812 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13813 			break;
   13814 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13815 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13816 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13817 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13818 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13819 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13820 			if (word_address == address) {
   13821 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13822 				rv = 0;
   13823 				break;
   13824 			}
   13825 		}
   13826 	}
   13827 
   13828 	return rv;
   13829 }
   13830 
   13831 static int
   13832 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13833 {
   13834 	int rv = 0;
   13835 	int i;
   13836 
   13837 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13838 		device_xname(sc->sc_dev), __func__));
   13839 
   13840 	if (sc->nvm.acquire(sc) != 0)
   13841 		return -1;
   13842 
   13843 	for (i = 0; i < words; i++) {
   13844 		switch (offset + i) {
   13845 		case NVM_OFF_MACADDR:
   13846 		case NVM_OFF_MACADDR1:
   13847 		case NVM_OFF_MACADDR2:
   13848 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13849 			if (rv != 0) {
   13850 				data[i] = 0xffff;
   13851 				rv = -1;
   13852 			}
   13853 			break;
   13854 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   13855 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13856 			if (rv != 0) {
   13857 				*data = INVM_DEFAULT_AL;
   13858 				rv = 0;
   13859 			}
   13860 			break;
   13861 		case NVM_OFF_CFG2:
   13862 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13863 			if (rv != 0) {
   13864 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13865 				rv = 0;
   13866 			}
   13867 			break;
   13868 		case NVM_OFF_CFG4:
   13869 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13870 			if (rv != 0) {
   13871 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13872 				rv = 0;
   13873 			}
   13874 			break;
   13875 		case NVM_OFF_LED_1_CFG:
   13876 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13877 			if (rv != 0) {
   13878 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13879 				rv = 0;
   13880 			}
   13881 			break;
   13882 		case NVM_OFF_LED_0_2_CFG:
   13883 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13884 			if (rv != 0) {
   13885 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13886 				rv = 0;
   13887 			}
   13888 			break;
   13889 		case NVM_OFF_ID_LED_SETTINGS:
   13890 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13891 			if (rv != 0) {
   13892 				*data = ID_LED_RESERVED_FFFF;
   13893 				rv = 0;
   13894 			}
   13895 			break;
   13896 		default:
   13897 			DPRINTF(sc, WM_DEBUG_NVM,
   13898 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13899 			*data = NVM_RESERVED_WORD;
   13900 			break;
   13901 		}
   13902 	}
   13903 
   13904 	sc->nvm.release(sc);
   13905 	return rv;
   13906 }
   13907 
   13908 /* Lock, detecting NVM type, validate checksum, version and read */
   13909 
   13910 static int
   13911 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13912 {
   13913 	uint32_t eecd = 0;
   13914 
   13915 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13916 	    || sc->sc_type == WM_T_82583) {
   13917 		eecd = CSR_READ(sc, WMREG_EECD);
   13918 
   13919 		/* Isolate bits 15 & 16 */
   13920 		eecd = ((eecd >> 15) & 0x03);
   13921 
   13922 		/* If both bits are set, device is Flash type */
   13923 		if (eecd == 0x03)
   13924 			return 0;
   13925 	}
   13926 	return 1;
   13927 }
   13928 
   13929 static int
   13930 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13931 {
   13932 	uint32_t eec;
   13933 
   13934 	eec = CSR_READ(sc, WMREG_EEC);
   13935 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13936 		return 1;
   13937 
   13938 	return 0;
   13939 }
   13940 
   13941 /*
   13942  * wm_nvm_validate_checksum
   13943  *
   13944  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13945  */
   13946 static int
   13947 wm_nvm_validate_checksum(struct wm_softc *sc)
   13948 {
   13949 	uint16_t checksum;
   13950 	uint16_t eeprom_data;
   13951 #ifdef WM_DEBUG
   13952 	uint16_t csum_wordaddr, valid_checksum;
   13953 #endif
   13954 	int i;
   13955 
   13956 	checksum = 0;
   13957 
   13958 	/* Don't check for I211 */
   13959 	if (sc->sc_type == WM_T_I211)
   13960 		return 0;
   13961 
   13962 #ifdef WM_DEBUG
   13963 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13964 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13965 		csum_wordaddr = NVM_OFF_COMPAT;
   13966 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13967 	} else {
   13968 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13969 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13970 	}
   13971 
   13972 	/* Dump EEPROM image for debug */
   13973 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13974 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13975 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13976 		/* XXX PCH_SPT? */
   13977 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13978 		if ((eeprom_data & valid_checksum) == 0)
   13979 			DPRINTF(sc, WM_DEBUG_NVM,
   13980 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13981 				device_xname(sc->sc_dev), eeprom_data,
   13982 				    valid_checksum));
   13983 	}
   13984 
   13985 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   13986 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13987 		for (i = 0; i < NVM_SIZE; i++) {
   13988 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13989 				printf("XXXX ");
   13990 			else
   13991 				printf("%04hx ", eeprom_data);
   13992 			if (i % 8 == 7)
   13993 				printf("\n");
   13994 		}
   13995 	}
   13996 
   13997 #endif /* WM_DEBUG */
   13998 
   13999 	for (i = 0; i < NVM_SIZE; i++) {
   14000 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14001 			return 1;
   14002 		checksum += eeprom_data;
   14003 	}
   14004 
   14005 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14006 #ifdef WM_DEBUG
   14007 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14008 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14009 #endif
   14010 	}
   14011 
   14012 	return 0;
   14013 }
   14014 
   14015 static void
   14016 wm_nvm_version_invm(struct wm_softc *sc)
   14017 {
   14018 	uint32_t dword;
   14019 
   14020 	/*
   14021 	 * Linux's code to decode version is very strange, so we don't
   14022 	 * obey that algorithm and just use word 61 as the document.
   14023 	 * Perhaps it's not perfect though...
   14024 	 *
   14025 	 * Example:
   14026 	 *
   14027 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14028 	 */
   14029 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14030 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14031 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14032 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14033 }
   14034 
   14035 static void
   14036 wm_nvm_version(struct wm_softc *sc)
   14037 {
   14038 	uint16_t major, minor, build, patch;
   14039 	uint16_t uid0, uid1;
   14040 	uint16_t nvm_data;
   14041 	uint16_t off;
   14042 	bool check_version = false;
   14043 	bool check_optionrom = false;
   14044 	bool have_build = false;
   14045 	bool have_uid = true;
   14046 
   14047 	/*
   14048 	 * Version format:
   14049 	 *
   14050 	 * XYYZ
   14051 	 * X0YZ
   14052 	 * X0YY
   14053 	 *
   14054 	 * Example:
   14055 	 *
   14056 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14057 	 *	82571	0x50a6	5.10.6?
   14058 	 *	82572	0x506a	5.6.10?
   14059 	 *	82572EI	0x5069	5.6.9?
   14060 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14061 	 *		0x2013	2.1.3?
   14062 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14063 	 * ICH8+82567	0x0040	0.4.0?
   14064 	 * ICH9+82566	0x1040	1.4.0?
   14065 	 *ICH10+82567	0x0043	0.4.3?
   14066 	 *  PCH+82577	0x00c1	0.12.1?
   14067 	 * PCH2+82579	0x00d3	0.13.3?
   14068 	 *		0x00d4	0.13.4?
   14069 	 *  LPT+I218	0x0023	0.2.3?
   14070 	 *  SPT+I219	0x0084	0.8.4?
   14071 	 *  CNP+I219	0x0054	0.5.4?
   14072 	 */
   14073 
   14074 	/*
   14075 	 * XXX
   14076 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14077 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   14078 	 */
   14079 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14080 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14081 		have_uid = false;
   14082 
   14083 	switch (sc->sc_type) {
   14084 	case WM_T_82571:
   14085 	case WM_T_82572:
   14086 	case WM_T_82574:
   14087 	case WM_T_82583:
   14088 		check_version = true;
   14089 		check_optionrom = true;
   14090 		have_build = true;
   14091 		break;
   14092 	case WM_T_ICH8:
   14093 	case WM_T_ICH9:
   14094 	case WM_T_ICH10:
   14095 	case WM_T_PCH:
   14096 	case WM_T_PCH2:
   14097 	case WM_T_PCH_LPT:
   14098 	case WM_T_PCH_SPT:
   14099 	case WM_T_PCH_CNP:
   14100 		check_version = true;
   14101 		have_build = true;
   14102 		have_uid = false;
   14103 		break;
   14104 	case WM_T_82575:
   14105 	case WM_T_82576:
   14106 	case WM_T_82580:
   14107 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14108 			check_version = true;
   14109 		break;
   14110 	case WM_T_I211:
   14111 		wm_nvm_version_invm(sc);
   14112 		have_uid = false;
   14113 		goto printver;
   14114 	case WM_T_I210:
   14115 		if (!wm_nvm_flash_presence_i210(sc)) {
   14116 			wm_nvm_version_invm(sc);
   14117 			have_uid = false;
   14118 			goto printver;
   14119 		}
   14120 		/* FALLTHROUGH */
   14121 	case WM_T_I350:
   14122 	case WM_T_I354:
   14123 		check_version = true;
   14124 		check_optionrom = true;
   14125 		break;
   14126 	default:
   14127 		return;
   14128 	}
   14129 	if (check_version
   14130 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14131 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14132 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14133 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14134 			build = nvm_data & NVM_BUILD_MASK;
   14135 			have_build = true;
   14136 		} else
   14137 			minor = nvm_data & 0x00ff;
   14138 
   14139 		/* Decimal */
   14140 		minor = (minor / 16) * 10 + (minor % 16);
   14141 		sc->sc_nvm_ver_major = major;
   14142 		sc->sc_nvm_ver_minor = minor;
   14143 
   14144 printver:
   14145 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14146 		    sc->sc_nvm_ver_minor);
   14147 		if (have_build) {
   14148 			sc->sc_nvm_ver_build = build;
   14149 			aprint_verbose(".%d", build);
   14150 		}
   14151 	}
   14152 
   14153 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14154 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14155 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14156 		/* Option ROM Version */
   14157 		if ((off != 0x0000) && (off != 0xffff)) {
   14158 			int rv;
   14159 
   14160 			off += NVM_COMBO_VER_OFF;
   14161 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14162 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14163 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14164 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14165 				/* 16bits */
   14166 				major = uid0 >> 8;
   14167 				build = (uid0 << 8) | (uid1 >> 8);
   14168 				patch = uid1 & 0x00ff;
   14169 				aprint_verbose(", option ROM Version %d.%d.%d",
   14170 				    major, build, patch);
   14171 			}
   14172 		}
   14173 	}
   14174 
   14175 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14176 		aprint_verbose(", Image Unique ID %08x",
   14177 		    ((uint32_t)uid1 << 16) | uid0);
   14178 }
   14179 
   14180 /*
   14181  * wm_nvm_read:
   14182  *
   14183  *	Read data from the serial EEPROM.
   14184  */
   14185 static int
   14186 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14187 {
   14188 	int rv;
   14189 
   14190 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14191 		device_xname(sc->sc_dev), __func__));
   14192 
   14193 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14194 		return -1;
   14195 
   14196 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14197 
   14198 	return rv;
   14199 }
   14200 
   14201 /*
   14202  * Hardware semaphores.
   14203  * Very complexed...
   14204  */
   14205 
   14206 static int
   14207 wm_get_null(struct wm_softc *sc)
   14208 {
   14209 
   14210 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14211 		device_xname(sc->sc_dev), __func__));
   14212 	return 0;
   14213 }
   14214 
   14215 static void
   14216 wm_put_null(struct wm_softc *sc)
   14217 {
   14218 
   14219 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14220 		device_xname(sc->sc_dev), __func__));
   14221 	return;
   14222 }
   14223 
   14224 static int
   14225 wm_get_eecd(struct wm_softc *sc)
   14226 {
   14227 	uint32_t reg;
   14228 	int x;
   14229 
   14230 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14231 		device_xname(sc->sc_dev), __func__));
   14232 
   14233 	reg = CSR_READ(sc, WMREG_EECD);
   14234 
   14235 	/* Request EEPROM access. */
   14236 	reg |= EECD_EE_REQ;
   14237 	CSR_WRITE(sc, WMREG_EECD, reg);
   14238 
   14239 	/* ..and wait for it to be granted. */
   14240 	for (x = 0; x < 1000; x++) {
   14241 		reg = CSR_READ(sc, WMREG_EECD);
   14242 		if (reg & EECD_EE_GNT)
   14243 			break;
   14244 		delay(5);
   14245 	}
   14246 	if ((reg & EECD_EE_GNT) == 0) {
   14247 		aprint_error_dev(sc->sc_dev,
   14248 		    "could not acquire EEPROM GNT\n");
   14249 		reg &= ~EECD_EE_REQ;
   14250 		CSR_WRITE(sc, WMREG_EECD, reg);
   14251 		return -1;
   14252 	}
   14253 
   14254 	return 0;
   14255 }
   14256 
   14257 static void
   14258 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14259 {
   14260 
   14261 	*eecd |= EECD_SK;
   14262 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14263 	CSR_WRITE_FLUSH(sc);
   14264 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14265 		delay(1);
   14266 	else
   14267 		delay(50);
   14268 }
   14269 
   14270 static void
   14271 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14272 {
   14273 
   14274 	*eecd &= ~EECD_SK;
   14275 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14276 	CSR_WRITE_FLUSH(sc);
   14277 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14278 		delay(1);
   14279 	else
   14280 		delay(50);
   14281 }
   14282 
   14283 static void
   14284 wm_put_eecd(struct wm_softc *sc)
   14285 {
   14286 	uint32_t reg;
   14287 
   14288 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14289 		device_xname(sc->sc_dev), __func__));
   14290 
   14291 	/* Stop nvm */
   14292 	reg = CSR_READ(sc, WMREG_EECD);
   14293 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14294 		/* Pull CS high */
   14295 		reg |= EECD_CS;
   14296 		wm_nvm_eec_clock_lower(sc, &reg);
   14297 	} else {
   14298 		/* CS on Microwire is active-high */
   14299 		reg &= ~(EECD_CS | EECD_DI);
   14300 		CSR_WRITE(sc, WMREG_EECD, reg);
   14301 		wm_nvm_eec_clock_raise(sc, &reg);
   14302 		wm_nvm_eec_clock_lower(sc, &reg);
   14303 	}
   14304 
   14305 	reg = CSR_READ(sc, WMREG_EECD);
   14306 	reg &= ~EECD_EE_REQ;
   14307 	CSR_WRITE(sc, WMREG_EECD, reg);
   14308 
   14309 	return;
   14310 }
   14311 
   14312 /*
   14313  * Get hardware semaphore.
   14314  * Same as e1000_get_hw_semaphore_generic()
   14315  */
   14316 static int
   14317 wm_get_swsm_semaphore(struct wm_softc *sc)
   14318 {
   14319 	int32_t timeout;
   14320 	uint32_t swsm;
   14321 
   14322 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14323 		device_xname(sc->sc_dev), __func__));
   14324 	KASSERT(sc->sc_nvm_wordsize > 0);
   14325 
   14326 retry:
   14327 	/* Get the SW semaphore. */
   14328 	timeout = sc->sc_nvm_wordsize + 1;
   14329 	while (timeout) {
   14330 		swsm = CSR_READ(sc, WMREG_SWSM);
   14331 
   14332 		if ((swsm & SWSM_SMBI) == 0)
   14333 			break;
   14334 
   14335 		delay(50);
   14336 		timeout--;
   14337 	}
   14338 
   14339 	if (timeout == 0) {
   14340 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14341 			/*
   14342 			 * In rare circumstances, the SW semaphore may already
   14343 			 * be held unintentionally. Clear the semaphore once
   14344 			 * before giving up.
   14345 			 */
   14346 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14347 			wm_put_swsm_semaphore(sc);
   14348 			goto retry;
   14349 		}
   14350 		aprint_error_dev(sc->sc_dev,
   14351 		    "could not acquire SWSM SMBI\n");
   14352 		return 1;
   14353 	}
   14354 
   14355 	/* Get the FW semaphore. */
   14356 	timeout = sc->sc_nvm_wordsize + 1;
   14357 	while (timeout) {
   14358 		swsm = CSR_READ(sc, WMREG_SWSM);
   14359 		swsm |= SWSM_SWESMBI;
   14360 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14361 		/* If we managed to set the bit we got the semaphore. */
   14362 		swsm = CSR_READ(sc, WMREG_SWSM);
   14363 		if (swsm & SWSM_SWESMBI)
   14364 			break;
   14365 
   14366 		delay(50);
   14367 		timeout--;
   14368 	}
   14369 
   14370 	if (timeout == 0) {
   14371 		aprint_error_dev(sc->sc_dev,
   14372 		    "could not acquire SWSM SWESMBI\n");
   14373 		/* Release semaphores */
   14374 		wm_put_swsm_semaphore(sc);
   14375 		return 1;
   14376 	}
   14377 	return 0;
   14378 }
   14379 
   14380 /*
   14381  * Put hardware semaphore.
   14382  * Same as e1000_put_hw_semaphore_generic()
   14383  */
   14384 static void
   14385 wm_put_swsm_semaphore(struct wm_softc *sc)
   14386 {
   14387 	uint32_t swsm;
   14388 
   14389 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14390 		device_xname(sc->sc_dev), __func__));
   14391 
   14392 	swsm = CSR_READ(sc, WMREG_SWSM);
   14393 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14394 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14395 }
   14396 
   14397 /*
   14398  * Get SW/FW semaphore.
   14399  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14400  */
   14401 static int
   14402 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14403 {
   14404 	uint32_t swfw_sync;
   14405 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14406 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14407 	int timeout;
   14408 
   14409 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14410 		device_xname(sc->sc_dev), __func__));
   14411 
   14412 	if (sc->sc_type == WM_T_80003)
   14413 		timeout = 50;
   14414 	else
   14415 		timeout = 200;
   14416 
   14417 	while (timeout) {
   14418 		if (wm_get_swsm_semaphore(sc)) {
   14419 			aprint_error_dev(sc->sc_dev,
   14420 			    "%s: failed to get semaphore\n",
   14421 			    __func__);
   14422 			return 1;
   14423 		}
   14424 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14425 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14426 			swfw_sync |= swmask;
   14427 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14428 			wm_put_swsm_semaphore(sc);
   14429 			return 0;
   14430 		}
   14431 		wm_put_swsm_semaphore(sc);
   14432 		delay(5000);
   14433 		timeout--;
   14434 	}
   14435 	device_printf(sc->sc_dev,
   14436 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14437 	    mask, swfw_sync);
   14438 	return 1;
   14439 }
   14440 
   14441 static void
   14442 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14443 {
   14444 	uint32_t swfw_sync;
   14445 
   14446 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14447 		device_xname(sc->sc_dev), __func__));
   14448 
   14449 	while (wm_get_swsm_semaphore(sc) != 0)
   14450 		continue;
   14451 
   14452 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14453 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14454 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14455 
   14456 	wm_put_swsm_semaphore(sc);
   14457 }
   14458 
   14459 static int
   14460 wm_get_nvm_80003(struct wm_softc *sc)
   14461 {
   14462 	int rv;
   14463 
   14464 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14465 		device_xname(sc->sc_dev), __func__));
   14466 
   14467 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14468 		aprint_error_dev(sc->sc_dev,
   14469 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14470 		return rv;
   14471 	}
   14472 
   14473 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14474 	    && (rv = wm_get_eecd(sc)) != 0) {
   14475 		aprint_error_dev(sc->sc_dev,
   14476 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14477 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14478 		return rv;
   14479 	}
   14480 
   14481 	return 0;
   14482 }
   14483 
   14484 static void
   14485 wm_put_nvm_80003(struct wm_softc *sc)
   14486 {
   14487 
   14488 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14489 		device_xname(sc->sc_dev), __func__));
   14490 
   14491 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14492 		wm_put_eecd(sc);
   14493 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14494 }
   14495 
   14496 static int
   14497 wm_get_nvm_82571(struct wm_softc *sc)
   14498 {
   14499 	int rv;
   14500 
   14501 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14502 		device_xname(sc->sc_dev), __func__));
   14503 
   14504 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14505 		return rv;
   14506 
   14507 	switch (sc->sc_type) {
   14508 	case WM_T_82573:
   14509 		break;
   14510 	default:
   14511 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14512 			rv = wm_get_eecd(sc);
   14513 		break;
   14514 	}
   14515 
   14516 	if (rv != 0) {
   14517 		aprint_error_dev(sc->sc_dev,
   14518 		    "%s: failed to get semaphore\n",
   14519 		    __func__);
   14520 		wm_put_swsm_semaphore(sc);
   14521 	}
   14522 
   14523 	return rv;
   14524 }
   14525 
   14526 static void
   14527 wm_put_nvm_82571(struct wm_softc *sc)
   14528 {
   14529 
   14530 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14531 		device_xname(sc->sc_dev), __func__));
   14532 
   14533 	switch (sc->sc_type) {
   14534 	case WM_T_82573:
   14535 		break;
   14536 	default:
   14537 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14538 			wm_put_eecd(sc);
   14539 		break;
   14540 	}
   14541 
   14542 	wm_put_swsm_semaphore(sc);
   14543 }
   14544 
   14545 static int
   14546 wm_get_phy_82575(struct wm_softc *sc)
   14547 {
   14548 
   14549 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14550 		device_xname(sc->sc_dev), __func__));
   14551 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14552 }
   14553 
   14554 static void
   14555 wm_put_phy_82575(struct wm_softc *sc)
   14556 {
   14557 
   14558 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14559 		device_xname(sc->sc_dev), __func__));
   14560 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14561 }
   14562 
   14563 static int
   14564 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14565 {
   14566 	uint32_t ext_ctrl;
   14567 	int timeout = 200;
   14568 
   14569 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14570 		device_xname(sc->sc_dev), __func__));
   14571 
   14572 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14573 	for (timeout = 0; timeout < 200; timeout++) {
   14574 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14575 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14576 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14577 
   14578 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14579 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14580 			return 0;
   14581 		delay(5000);
   14582 	}
   14583 	device_printf(sc->sc_dev,
   14584 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14585 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14586 	return 1;
   14587 }
   14588 
   14589 static void
   14590 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14591 {
   14592 	uint32_t ext_ctrl;
   14593 
   14594 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14595 		device_xname(sc->sc_dev), __func__));
   14596 
   14597 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14598 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14599 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14600 
   14601 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14602 }
   14603 
   14604 static int
   14605 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14606 {
   14607 	uint32_t ext_ctrl;
   14608 	int timeout;
   14609 
   14610 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14611 		device_xname(sc->sc_dev), __func__));
   14612 	mutex_enter(sc->sc_ich_phymtx);
   14613 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14614 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14615 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14616 			break;
   14617 		delay(1000);
   14618 	}
   14619 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14620 		device_printf(sc->sc_dev,
   14621 		    "SW has already locked the resource\n");
   14622 		goto out;
   14623 	}
   14624 
   14625 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14626 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14627 	for (timeout = 0; timeout < 1000; timeout++) {
   14628 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14629 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14630 			break;
   14631 		delay(1000);
   14632 	}
   14633 	if (timeout >= 1000) {
   14634 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14635 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14636 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14637 		goto out;
   14638 	}
   14639 	return 0;
   14640 
   14641 out:
   14642 	mutex_exit(sc->sc_ich_phymtx);
   14643 	return 1;
   14644 }
   14645 
   14646 static void
   14647 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14648 {
   14649 	uint32_t ext_ctrl;
   14650 
   14651 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14652 		device_xname(sc->sc_dev), __func__));
   14653 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14654 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14655 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14656 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14657 	} else {
   14658 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14659 	}
   14660 
   14661 	mutex_exit(sc->sc_ich_phymtx);
   14662 }
   14663 
   14664 static int
   14665 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14666 {
   14667 
   14668 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14669 		device_xname(sc->sc_dev), __func__));
   14670 	mutex_enter(sc->sc_ich_nvmmtx);
   14671 
   14672 	return 0;
   14673 }
   14674 
   14675 static void
   14676 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14677 {
   14678 
   14679 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14680 		device_xname(sc->sc_dev), __func__));
   14681 	mutex_exit(sc->sc_ich_nvmmtx);
   14682 }
   14683 
   14684 static int
   14685 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14686 {
   14687 	int i = 0;
   14688 	uint32_t reg;
   14689 
   14690 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14691 		device_xname(sc->sc_dev), __func__));
   14692 
   14693 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14694 	do {
   14695 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14696 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14697 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14698 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14699 			break;
   14700 		delay(2*1000);
   14701 		i++;
   14702 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14703 
   14704 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14705 		wm_put_hw_semaphore_82573(sc);
   14706 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14707 		    device_xname(sc->sc_dev));
   14708 		return -1;
   14709 	}
   14710 
   14711 	return 0;
   14712 }
   14713 
   14714 static void
   14715 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14716 {
   14717 	uint32_t reg;
   14718 
   14719 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14720 		device_xname(sc->sc_dev), __func__));
   14721 
   14722 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14723 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14724 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14725 }
   14726 
   14727 /*
   14728  * Management mode and power management related subroutines.
   14729  * BMC, AMT, suspend/resume and EEE.
   14730  */
   14731 
   14732 #ifdef WM_WOL
   14733 static int
   14734 wm_check_mng_mode(struct wm_softc *sc)
   14735 {
   14736 	int rv;
   14737 
   14738 	switch (sc->sc_type) {
   14739 	case WM_T_ICH8:
   14740 	case WM_T_ICH9:
   14741 	case WM_T_ICH10:
   14742 	case WM_T_PCH:
   14743 	case WM_T_PCH2:
   14744 	case WM_T_PCH_LPT:
   14745 	case WM_T_PCH_SPT:
   14746 	case WM_T_PCH_CNP:
   14747 		rv = wm_check_mng_mode_ich8lan(sc);
   14748 		break;
   14749 	case WM_T_82574:
   14750 	case WM_T_82583:
   14751 		rv = wm_check_mng_mode_82574(sc);
   14752 		break;
   14753 	case WM_T_82571:
   14754 	case WM_T_82572:
   14755 	case WM_T_82573:
   14756 	case WM_T_80003:
   14757 		rv = wm_check_mng_mode_generic(sc);
   14758 		break;
   14759 	default:
   14760 		/* Noting to do */
   14761 		rv = 0;
   14762 		break;
   14763 	}
   14764 
   14765 	return rv;
   14766 }
   14767 
   14768 static int
   14769 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14770 {
   14771 	uint32_t fwsm;
   14772 
   14773 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14774 
   14775 	if (((fwsm & FWSM_FW_VALID) != 0)
   14776 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14777 		return 1;
   14778 
   14779 	return 0;
   14780 }
   14781 
   14782 static int
   14783 wm_check_mng_mode_82574(struct wm_softc *sc)
   14784 {
   14785 	uint16_t data;
   14786 
   14787 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14788 
   14789 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14790 		return 1;
   14791 
   14792 	return 0;
   14793 }
   14794 
   14795 static int
   14796 wm_check_mng_mode_generic(struct wm_softc *sc)
   14797 {
   14798 	uint32_t fwsm;
   14799 
   14800 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14801 
   14802 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14803 		return 1;
   14804 
   14805 	return 0;
   14806 }
   14807 #endif /* WM_WOL */
   14808 
   14809 static int
   14810 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14811 {
   14812 	uint32_t manc, fwsm, factps;
   14813 
   14814 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14815 		return 0;
   14816 
   14817 	manc = CSR_READ(sc, WMREG_MANC);
   14818 
   14819 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14820 		device_xname(sc->sc_dev), manc));
   14821 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14822 		return 0;
   14823 
   14824 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14825 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14826 		factps = CSR_READ(sc, WMREG_FACTPS);
   14827 		if (((factps & FACTPS_MNGCG) == 0)
   14828 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14829 			return 1;
   14830 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14831 		uint16_t data;
   14832 
   14833 		factps = CSR_READ(sc, WMREG_FACTPS);
   14834 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14835 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14836 			device_xname(sc->sc_dev), factps, data));
   14837 		if (((factps & FACTPS_MNGCG) == 0)
   14838 		    && ((data & NVM_CFG2_MNGM_MASK)
   14839 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14840 			return 1;
   14841 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14842 	    && ((manc & MANC_ASF_EN) == 0))
   14843 		return 1;
   14844 
   14845 	return 0;
   14846 }
   14847 
   14848 static bool
   14849 wm_phy_resetisblocked(struct wm_softc *sc)
   14850 {
   14851 	bool blocked = false;
   14852 	uint32_t reg;
   14853 	int i = 0;
   14854 
   14855 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14856 		device_xname(sc->sc_dev), __func__));
   14857 
   14858 	switch (sc->sc_type) {
   14859 	case WM_T_ICH8:
   14860 	case WM_T_ICH9:
   14861 	case WM_T_ICH10:
   14862 	case WM_T_PCH:
   14863 	case WM_T_PCH2:
   14864 	case WM_T_PCH_LPT:
   14865 	case WM_T_PCH_SPT:
   14866 	case WM_T_PCH_CNP:
   14867 		do {
   14868 			reg = CSR_READ(sc, WMREG_FWSM);
   14869 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14870 				blocked = true;
   14871 				delay(10*1000);
   14872 				continue;
   14873 			}
   14874 			blocked = false;
   14875 		} while (blocked && (i++ < 30));
   14876 		return blocked;
   14877 		break;
   14878 	case WM_T_82571:
   14879 	case WM_T_82572:
   14880 	case WM_T_82573:
   14881 	case WM_T_82574:
   14882 	case WM_T_82583:
   14883 	case WM_T_80003:
   14884 		reg = CSR_READ(sc, WMREG_MANC);
   14885 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14886 			return true;
   14887 		else
   14888 			return false;
   14889 		break;
   14890 	default:
   14891 		/* No problem */
   14892 		break;
   14893 	}
   14894 
   14895 	return false;
   14896 }
   14897 
   14898 static void
   14899 wm_get_hw_control(struct wm_softc *sc)
   14900 {
   14901 	uint32_t reg;
   14902 
   14903 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14904 		device_xname(sc->sc_dev), __func__));
   14905 
   14906 	if (sc->sc_type == WM_T_82573) {
   14907 		reg = CSR_READ(sc, WMREG_SWSM);
   14908 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14909 	} else if (sc->sc_type >= WM_T_82571) {
   14910 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14911 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14912 	}
   14913 }
   14914 
   14915 static void
   14916 wm_release_hw_control(struct wm_softc *sc)
   14917 {
   14918 	uint32_t reg;
   14919 
   14920 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14921 		device_xname(sc->sc_dev), __func__));
   14922 
   14923 	if (sc->sc_type == WM_T_82573) {
   14924 		reg = CSR_READ(sc, WMREG_SWSM);
   14925 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14926 	} else if (sc->sc_type >= WM_T_82571) {
   14927 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14928 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14929 	}
   14930 }
   14931 
   14932 static void
   14933 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14934 {
   14935 	uint32_t reg;
   14936 
   14937 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14938 		device_xname(sc->sc_dev), __func__));
   14939 
   14940 	if (sc->sc_type < WM_T_PCH2)
   14941 		return;
   14942 
   14943 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14944 
   14945 	if (gate)
   14946 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14947 	else
   14948 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14949 
   14950 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14951 }
   14952 
   14953 static int
   14954 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14955 {
   14956 	uint32_t fwsm, reg;
   14957 	int rv = 0;
   14958 
   14959 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14960 		device_xname(sc->sc_dev), __func__));
   14961 
   14962 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14963 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14964 
   14965 	/* Disable ULP */
   14966 	wm_ulp_disable(sc);
   14967 
   14968 	/* Acquire PHY semaphore */
   14969 	rv = sc->phy.acquire(sc);
   14970 	if (rv != 0) {
   14971 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   14972 		device_xname(sc->sc_dev), __func__));
   14973 		return -1;
   14974 	}
   14975 
   14976 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14977 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14978 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14979 	 */
   14980 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14981 	switch (sc->sc_type) {
   14982 	case WM_T_PCH_LPT:
   14983 	case WM_T_PCH_SPT:
   14984 	case WM_T_PCH_CNP:
   14985 		if (wm_phy_is_accessible_pchlan(sc))
   14986 			break;
   14987 
   14988 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14989 		 * forcing MAC to SMBus mode first.
   14990 		 */
   14991 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14992 		reg |= CTRL_EXT_FORCE_SMBUS;
   14993 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14994 #if 0
   14995 		/* XXX Isn't this required??? */
   14996 		CSR_WRITE_FLUSH(sc);
   14997 #endif
   14998 		/* Wait 50 milliseconds for MAC to finish any retries
   14999 		 * that it might be trying to perform from previous
   15000 		 * attempts to acknowledge any phy read requests.
   15001 		 */
   15002 		delay(50 * 1000);
   15003 		/* FALLTHROUGH */
   15004 	case WM_T_PCH2:
   15005 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15006 			break;
   15007 		/* FALLTHROUGH */
   15008 	case WM_T_PCH:
   15009 		if (sc->sc_type == WM_T_PCH)
   15010 			if ((fwsm & FWSM_FW_VALID) != 0)
   15011 				break;
   15012 
   15013 		if (wm_phy_resetisblocked(sc) == true) {
   15014 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15015 			break;
   15016 		}
   15017 
   15018 		/* Toggle LANPHYPC Value bit */
   15019 		wm_toggle_lanphypc_pch_lpt(sc);
   15020 
   15021 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15022 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15023 				break;
   15024 
   15025 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15026 			 * so ensure that the MAC is also out of SMBus mode
   15027 			 */
   15028 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15029 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15030 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15031 
   15032 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15033 				break;
   15034 			rv = -1;
   15035 		}
   15036 		break;
   15037 	default:
   15038 		break;
   15039 	}
   15040 
   15041 	/* Release semaphore */
   15042 	sc->phy.release(sc);
   15043 
   15044 	if (rv == 0) {
   15045 		/* Check to see if able to reset PHY.  Print error if not */
   15046 		if (wm_phy_resetisblocked(sc)) {
   15047 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15048 			goto out;
   15049 		}
   15050 
   15051 		/* Reset the PHY before any access to it.  Doing so, ensures
   15052 		 * that the PHY is in a known good state before we read/write
   15053 		 * PHY registers.  The generic reset is sufficient here,
   15054 		 * because we haven't determined the PHY type yet.
   15055 		 */
   15056 		if (wm_reset_phy(sc) != 0)
   15057 			goto out;
   15058 
   15059 		/* On a successful reset, possibly need to wait for the PHY
   15060 		 * to quiesce to an accessible state before returning control
   15061 		 * to the calling function.  If the PHY does not quiesce, then
   15062 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15063 		 *  the PHY is in.
   15064 		 */
   15065 		if (wm_phy_resetisblocked(sc))
   15066 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15067 	}
   15068 
   15069 out:
   15070 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15071 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15072 		delay(10*1000);
   15073 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15074 	}
   15075 
   15076 	return 0;
   15077 }
   15078 
   15079 static void
   15080 wm_init_manageability(struct wm_softc *sc)
   15081 {
   15082 
   15083 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15084 		device_xname(sc->sc_dev), __func__));
   15085 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15086 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15087 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15088 
   15089 		/* Disable hardware interception of ARP */
   15090 		manc &= ~MANC_ARP_EN;
   15091 
   15092 		/* Enable receiving management packets to the host */
   15093 		if (sc->sc_type >= WM_T_82571) {
   15094 			manc |= MANC_EN_MNG2HOST;
   15095 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15096 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15097 		}
   15098 
   15099 		CSR_WRITE(sc, WMREG_MANC, manc);
   15100 	}
   15101 }
   15102 
   15103 static void
   15104 wm_release_manageability(struct wm_softc *sc)
   15105 {
   15106 
   15107 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15108 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15109 
   15110 		manc |= MANC_ARP_EN;
   15111 		if (sc->sc_type >= WM_T_82571)
   15112 			manc &= ~MANC_EN_MNG2HOST;
   15113 
   15114 		CSR_WRITE(sc, WMREG_MANC, manc);
   15115 	}
   15116 }
   15117 
   15118 static void
   15119 wm_get_wakeup(struct wm_softc *sc)
   15120 {
   15121 
   15122 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15123 	switch (sc->sc_type) {
   15124 	case WM_T_82573:
   15125 	case WM_T_82583:
   15126 		sc->sc_flags |= WM_F_HAS_AMT;
   15127 		/* FALLTHROUGH */
   15128 	case WM_T_80003:
   15129 	case WM_T_82575:
   15130 	case WM_T_82576:
   15131 	case WM_T_82580:
   15132 	case WM_T_I350:
   15133 	case WM_T_I354:
   15134 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15135 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15136 		/* FALLTHROUGH */
   15137 	case WM_T_82541:
   15138 	case WM_T_82541_2:
   15139 	case WM_T_82547:
   15140 	case WM_T_82547_2:
   15141 	case WM_T_82571:
   15142 	case WM_T_82572:
   15143 	case WM_T_82574:
   15144 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15145 		break;
   15146 	case WM_T_ICH8:
   15147 	case WM_T_ICH9:
   15148 	case WM_T_ICH10:
   15149 	case WM_T_PCH:
   15150 	case WM_T_PCH2:
   15151 	case WM_T_PCH_LPT:
   15152 	case WM_T_PCH_SPT:
   15153 	case WM_T_PCH_CNP:
   15154 		sc->sc_flags |= WM_F_HAS_AMT;
   15155 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15156 		break;
   15157 	default:
   15158 		break;
   15159 	}
   15160 
   15161 	/* 1: HAS_MANAGE */
   15162 	if (wm_enable_mng_pass_thru(sc) != 0)
   15163 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15164 
   15165 	/*
   15166 	 * Note that the WOL flags is set after the resetting of the eeprom
   15167 	 * stuff
   15168 	 */
   15169 }
   15170 
   15171 /*
   15172  * Unconfigure Ultra Low Power mode.
   15173  * Only for I217 and newer (see below).
   15174  */
   15175 static int
   15176 wm_ulp_disable(struct wm_softc *sc)
   15177 {
   15178 	uint32_t reg;
   15179 	uint16_t phyreg;
   15180 	int i = 0, rv = 0;
   15181 
   15182 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15183 		device_xname(sc->sc_dev), __func__));
   15184 	/* Exclude old devices */
   15185 	if ((sc->sc_type < WM_T_PCH_LPT)
   15186 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15187 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15188 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15189 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15190 		return 0;
   15191 
   15192 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15193 		/* Request ME un-configure ULP mode in the PHY */
   15194 		reg = CSR_READ(sc, WMREG_H2ME);
   15195 		reg &= ~H2ME_ULP;
   15196 		reg |= H2ME_ENFORCE_SETTINGS;
   15197 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15198 
   15199 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15200 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15201 			if (i++ == 30) {
   15202 				device_printf(sc->sc_dev, "%s timed out\n",
   15203 				    __func__);
   15204 				return -1;
   15205 			}
   15206 			delay(10 * 1000);
   15207 		}
   15208 		reg = CSR_READ(sc, WMREG_H2ME);
   15209 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15210 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15211 
   15212 		return 0;
   15213 	}
   15214 
   15215 	/* Acquire semaphore */
   15216 	rv = sc->phy.acquire(sc);
   15217 	if (rv != 0) {
   15218 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15219 		device_xname(sc->sc_dev), __func__));
   15220 		return -1;
   15221 	}
   15222 
   15223 	/* Toggle LANPHYPC */
   15224 	wm_toggle_lanphypc_pch_lpt(sc);
   15225 
   15226 	/* Unforce SMBus mode in PHY */
   15227 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15228 	if (rv != 0) {
   15229 		uint32_t reg2;
   15230 
   15231 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15232 			__func__);
   15233 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15234 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15235 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15236 		delay(50 * 1000);
   15237 
   15238 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15239 		    &phyreg);
   15240 		if (rv != 0)
   15241 			goto release;
   15242 	}
   15243 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15244 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15245 
   15246 	/* Unforce SMBus mode in MAC */
   15247 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15248 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15249 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15250 
   15251 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15252 	if (rv != 0)
   15253 		goto release;
   15254 	phyreg |= HV_PM_CTRL_K1_ENA;
   15255 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15256 
   15257 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15258 		&phyreg);
   15259 	if (rv != 0)
   15260 		goto release;
   15261 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15262 	    | I218_ULP_CONFIG1_STICKY_ULP
   15263 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15264 	    | I218_ULP_CONFIG1_WOL_HOST
   15265 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15266 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15267 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15268 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15269 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15270 	phyreg |= I218_ULP_CONFIG1_START;
   15271 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15272 
   15273 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15274 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15275 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15276 
   15277 release:
   15278 	/* Release semaphore */
   15279 	sc->phy.release(sc);
   15280 	wm_gmii_reset(sc);
   15281 	delay(50 * 1000);
   15282 
   15283 	return rv;
   15284 }
   15285 
   15286 /* WOL in the newer chipset interfaces (pchlan) */
   15287 static int
   15288 wm_enable_phy_wakeup(struct wm_softc *sc)
   15289 {
   15290 	device_t dev = sc->sc_dev;
   15291 	uint32_t mreg, moff;
   15292 	uint16_t wuce, wuc, wufc, preg;
   15293 	int i, rv;
   15294 
   15295 	KASSERT(sc->sc_type >= WM_T_PCH);
   15296 
   15297 	/* Copy MAC RARs to PHY RARs */
   15298 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15299 
   15300 	/* Activate PHY wakeup */
   15301 	rv = sc->phy.acquire(sc);
   15302 	if (rv != 0) {
   15303 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15304 		    __func__);
   15305 		return rv;
   15306 	}
   15307 
   15308 	/*
   15309 	 * Enable access to PHY wakeup registers.
   15310 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15311 	 */
   15312 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15313 	if (rv != 0) {
   15314 		device_printf(dev,
   15315 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15316 		goto release;
   15317 	}
   15318 
   15319 	/* Copy MAC MTA to PHY MTA */
   15320 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15321 		uint16_t lo, hi;
   15322 
   15323 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15324 		lo = (uint16_t)(mreg & 0xffff);
   15325 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15326 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15327 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15328 	}
   15329 
   15330 	/* Configure PHY Rx Control register */
   15331 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15332 	mreg = CSR_READ(sc, WMREG_RCTL);
   15333 	if (mreg & RCTL_UPE)
   15334 		preg |= BM_RCTL_UPE;
   15335 	if (mreg & RCTL_MPE)
   15336 		preg |= BM_RCTL_MPE;
   15337 	preg &= ~(BM_RCTL_MO_MASK);
   15338 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15339 	if (moff != 0)
   15340 		preg |= moff << BM_RCTL_MO_SHIFT;
   15341 	if (mreg & RCTL_BAM)
   15342 		preg |= BM_RCTL_BAM;
   15343 	if (mreg & RCTL_PMCF)
   15344 		preg |= BM_RCTL_PMCF;
   15345 	mreg = CSR_READ(sc, WMREG_CTRL);
   15346 	if (mreg & CTRL_RFCE)
   15347 		preg |= BM_RCTL_RFCE;
   15348 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15349 
   15350 	wuc = WUC_APME | WUC_PME_EN;
   15351 	wufc = WUFC_MAG;
   15352 	/* Enable PHY wakeup in MAC register */
   15353 	CSR_WRITE(sc, WMREG_WUC,
   15354 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15355 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15356 
   15357 	/* Configure and enable PHY wakeup in PHY registers */
   15358 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15359 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15360 
   15361 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15362 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15363 
   15364 release:
   15365 	sc->phy.release(sc);
   15366 
   15367 	return 0;
   15368 }
   15369 
   15370 /* Power down workaround on D3 */
   15371 static void
   15372 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15373 {
   15374 	uint32_t reg;
   15375 	uint16_t phyreg;
   15376 	int i;
   15377 
   15378 	for (i = 0; i < 2; i++) {
   15379 		/* Disable link */
   15380 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15381 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15382 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15383 
   15384 		/*
   15385 		 * Call gig speed drop workaround on Gig disable before
   15386 		 * accessing any PHY registers
   15387 		 */
   15388 		if (sc->sc_type == WM_T_ICH8)
   15389 			wm_gig_downshift_workaround_ich8lan(sc);
   15390 
   15391 		/* Write VR power-down enable */
   15392 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15393 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15394 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15395 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15396 
   15397 		/* Read it back and test */
   15398 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15399 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15400 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15401 			break;
   15402 
   15403 		/* Issue PHY reset and repeat at most one more time */
   15404 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15405 	}
   15406 }
   15407 
   15408 /*
   15409  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15410  *  @sc: pointer to the HW structure
   15411  *
   15412  *  During S0 to Sx transition, it is possible the link remains at gig
   15413  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15414  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15415  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15416  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15417  *  needs to be written.
   15418  *  Parts that support (and are linked to a partner which support) EEE in
   15419  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15420  *  than 10Mbps w/o EEE.
   15421  */
   15422 static void
   15423 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15424 {
   15425 	device_t dev = sc->sc_dev;
   15426 	struct ethercom *ec = &sc->sc_ethercom;
   15427 	uint32_t phy_ctrl;
   15428 	int rv;
   15429 
   15430 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15431 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15432 
   15433 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15434 
   15435 	if (sc->sc_phytype == WMPHY_I217) {
   15436 		uint16_t devid = sc->sc_pcidevid;
   15437 
   15438 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15439 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15440 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15441 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15442 		    (sc->sc_type >= WM_T_PCH_SPT))
   15443 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15444 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15445 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15446 
   15447 		if (sc->phy.acquire(sc) != 0)
   15448 			goto out;
   15449 
   15450 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15451 			uint16_t eee_advert;
   15452 
   15453 			rv = wm_read_emi_reg_locked(dev,
   15454 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15455 			if (rv)
   15456 				goto release;
   15457 
   15458 			/*
   15459 			 * Disable LPLU if both link partners support 100BaseT
   15460 			 * EEE and 100Full is advertised on both ends of the
   15461 			 * link, and enable Auto Enable LPI since there will
   15462 			 * be no driver to enable LPI while in Sx.
   15463 			 */
   15464 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15465 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15466 				uint16_t anar, phy_reg;
   15467 
   15468 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15469 				    &anar);
   15470 				if (anar & ANAR_TX_FD) {
   15471 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15472 					    PHY_CTRL_NOND0A_LPLU);
   15473 
   15474 					/* Set Auto Enable LPI after link up */
   15475 					sc->phy.readreg_locked(dev, 2,
   15476 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15477 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15478 					sc->phy.writereg_locked(dev, 2,
   15479 					    I217_LPI_GPIO_CTRL, phy_reg);
   15480 				}
   15481 			}
   15482 		}
   15483 
   15484 		/*
   15485 		 * For i217 Intel Rapid Start Technology support,
   15486 		 * when the system is going into Sx and no manageability engine
   15487 		 * is present, the driver must configure proxy to reset only on
   15488 		 * power good.	LPI (Low Power Idle) state must also reset only
   15489 		 * on power good, as well as the MTA (Multicast table array).
   15490 		 * The SMBus release must also be disabled on LCD reset.
   15491 		 */
   15492 
   15493 		/*
   15494 		 * Enable MTA to reset for Intel Rapid Start Technology
   15495 		 * Support
   15496 		 */
   15497 
   15498 release:
   15499 		sc->phy.release(sc);
   15500 	}
   15501 out:
   15502 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15503 
   15504 	if (sc->sc_type == WM_T_ICH8)
   15505 		wm_gig_downshift_workaround_ich8lan(sc);
   15506 
   15507 	if (sc->sc_type >= WM_T_PCH) {
   15508 		wm_oem_bits_config_ich8lan(sc, false);
   15509 
   15510 		/* Reset PHY to activate OEM bits on 82577/8 */
   15511 		if (sc->sc_type == WM_T_PCH)
   15512 			wm_reset_phy(sc);
   15513 
   15514 		if (sc->phy.acquire(sc) != 0)
   15515 			return;
   15516 		wm_write_smbus_addr(sc);
   15517 		sc->phy.release(sc);
   15518 	}
   15519 }
   15520 
   15521 /*
   15522  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15523  *  @sc: pointer to the HW structure
   15524  *
   15525  *  During Sx to S0 transitions on non-managed devices or managed devices
   15526  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15527  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15528  *  the PHY.
   15529  *  On i217, setup Intel Rapid Start Technology.
   15530  */
   15531 static int
   15532 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15533 {
   15534 	device_t dev = sc->sc_dev;
   15535 	int rv;
   15536 
   15537 	if (sc->sc_type < WM_T_PCH2)
   15538 		return 0;
   15539 
   15540 	rv = wm_init_phy_workarounds_pchlan(sc);
   15541 	if (rv != 0)
   15542 		return -1;
   15543 
   15544 	/* For i217 Intel Rapid Start Technology support when the system
   15545 	 * is transitioning from Sx and no manageability engine is present
   15546 	 * configure SMBus to restore on reset, disable proxy, and enable
   15547 	 * the reset on MTA (Multicast table array).
   15548 	 */
   15549 	if (sc->sc_phytype == WMPHY_I217) {
   15550 		uint16_t phy_reg;
   15551 
   15552 		if (sc->phy.acquire(sc) != 0)
   15553 			return -1;
   15554 
   15555 		/* Clear Auto Enable LPI after link up */
   15556 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15557 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15558 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15559 
   15560 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15561 			/* Restore clear on SMB if no manageability engine
   15562 			 * is present
   15563 			 */
   15564 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15565 			    &phy_reg);
   15566 			if (rv != 0)
   15567 				goto release;
   15568 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15569 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15570 
   15571 			/* Disable Proxy */
   15572 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15573 		}
   15574 		/* Enable reset on MTA */
   15575 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15576 		if (rv != 0)
   15577 			goto release;
   15578 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15579 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15580 
   15581 release:
   15582 		sc->phy.release(sc);
   15583 		return rv;
   15584 	}
   15585 
   15586 	return 0;
   15587 }
   15588 
   15589 static void
   15590 wm_enable_wakeup(struct wm_softc *sc)
   15591 {
   15592 	uint32_t reg, pmreg;
   15593 	pcireg_t pmode;
   15594 	int rv = 0;
   15595 
   15596 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15597 		device_xname(sc->sc_dev), __func__));
   15598 
   15599 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15600 	    &pmreg, NULL) == 0)
   15601 		return;
   15602 
   15603 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15604 		goto pme;
   15605 
   15606 	/* Advertise the wakeup capability */
   15607 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15608 	    | CTRL_SWDPIN(3));
   15609 
   15610 	/* Keep the laser running on fiber adapters */
   15611 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15612 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15613 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15614 		reg |= CTRL_EXT_SWDPIN(3);
   15615 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15616 	}
   15617 
   15618 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15619 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15620 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15621 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15622 		wm_suspend_workarounds_ich8lan(sc);
   15623 
   15624 #if 0	/* For the multicast packet */
   15625 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15626 	reg |= WUFC_MC;
   15627 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15628 #endif
   15629 
   15630 	if (sc->sc_type >= WM_T_PCH) {
   15631 		rv = wm_enable_phy_wakeup(sc);
   15632 		if (rv != 0)
   15633 			goto pme;
   15634 	} else {
   15635 		/* Enable wakeup by the MAC */
   15636 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15637 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15638 	}
   15639 
   15640 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15641 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15642 		|| (sc->sc_type == WM_T_PCH2))
   15643 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15644 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15645 
   15646 pme:
   15647 	/* Request PME */
   15648 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15649 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   15650 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15651 		/* For WOL */
   15652 		pmode |= PCI_PMCSR_PME_EN;
   15653 	} else {
   15654 		/* Disable WOL */
   15655 		pmode &= ~PCI_PMCSR_PME_EN;
   15656 	}
   15657 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15658 }
   15659 
   15660 /* Disable ASPM L0s and/or L1 for workaround */
   15661 static void
   15662 wm_disable_aspm(struct wm_softc *sc)
   15663 {
   15664 	pcireg_t reg, mask = 0;
   15665 	unsigned const char *str = "";
   15666 
   15667 	/*
   15668 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15669 	 * space.
   15670 	 */
   15671 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15672 		return;
   15673 
   15674 	switch (sc->sc_type) {
   15675 	case WM_T_82571:
   15676 	case WM_T_82572:
   15677 		/*
   15678 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15679 		 * State Power management L1 State (ASPM L1).
   15680 		 */
   15681 		mask = PCIE_LCSR_ASPM_L1;
   15682 		str = "L1 is";
   15683 		break;
   15684 	case WM_T_82573:
   15685 	case WM_T_82574:
   15686 	case WM_T_82583:
   15687 		/*
   15688 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15689 		 *
   15690 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15691 		 * some chipset.  The document of 82574 and 82583 says that
   15692 		 * disabling L0s with some specific chipset is sufficient,
   15693 		 * but we follow as of the Intel em driver does.
   15694 		 *
   15695 		 * References:
   15696 		 * Errata 8 of the Specification Update of i82573.
   15697 		 * Errata 20 of the Specification Update of i82574.
   15698 		 * Errata 9 of the Specification Update of i82583.
   15699 		 */
   15700 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15701 		str = "L0s and L1 are";
   15702 		break;
   15703 	default:
   15704 		return;
   15705 	}
   15706 
   15707 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15708 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15709 	reg &= ~mask;
   15710 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15711 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15712 
   15713 	/* Print only in wm_attach() */
   15714 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15715 		aprint_verbose_dev(sc->sc_dev,
   15716 		    "ASPM %s disabled to workaround the errata.\n", str);
   15717 }
   15718 
   15719 /* LPLU */
   15720 
   15721 static void
   15722 wm_lplu_d0_disable(struct wm_softc *sc)
   15723 {
   15724 	struct mii_data *mii = &sc->sc_mii;
   15725 	uint32_t reg;
   15726 	uint16_t phyval;
   15727 
   15728 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15729 		device_xname(sc->sc_dev), __func__));
   15730 
   15731 	if (sc->sc_phytype == WMPHY_IFE)
   15732 		return;
   15733 
   15734 	switch (sc->sc_type) {
   15735 	case WM_T_82571:
   15736 	case WM_T_82572:
   15737 	case WM_T_82573:
   15738 	case WM_T_82575:
   15739 	case WM_T_82576:
   15740 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   15741 		phyval &= ~PMR_D0_LPLU;
   15742 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   15743 		break;
   15744 	case WM_T_82580:
   15745 	case WM_T_I350:
   15746 	case WM_T_I210:
   15747 	case WM_T_I211:
   15748 		reg = CSR_READ(sc, WMREG_PHPM);
   15749 		reg &= ~PHPM_D0A_LPLU;
   15750 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15751 		break;
   15752 	case WM_T_82574:
   15753 	case WM_T_82583:
   15754 	case WM_T_ICH8:
   15755 	case WM_T_ICH9:
   15756 	case WM_T_ICH10:
   15757 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15758 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15759 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15760 		CSR_WRITE_FLUSH(sc);
   15761 		break;
   15762 	case WM_T_PCH:
   15763 	case WM_T_PCH2:
   15764 	case WM_T_PCH_LPT:
   15765 	case WM_T_PCH_SPT:
   15766 	case WM_T_PCH_CNP:
   15767 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15768 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15769 		if (wm_phy_resetisblocked(sc) == false)
   15770 			phyval |= HV_OEM_BITS_ANEGNOW;
   15771 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15772 		break;
   15773 	default:
   15774 		break;
   15775 	}
   15776 }
   15777 
   15778 /* EEE */
   15779 
   15780 static int
   15781 wm_set_eee_i350(struct wm_softc *sc)
   15782 {
   15783 	struct ethercom *ec = &sc->sc_ethercom;
   15784 	uint32_t ipcnfg, eeer;
   15785 	uint32_t ipcnfg_mask
   15786 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15787 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15788 
   15789 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15790 
   15791 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15792 	eeer = CSR_READ(sc, WMREG_EEER);
   15793 
   15794 	/* Enable or disable per user setting */
   15795 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15796 		ipcnfg |= ipcnfg_mask;
   15797 		eeer |= eeer_mask;
   15798 	} else {
   15799 		ipcnfg &= ~ipcnfg_mask;
   15800 		eeer &= ~eeer_mask;
   15801 	}
   15802 
   15803 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15804 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15805 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15806 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15807 
   15808 	return 0;
   15809 }
   15810 
   15811 static int
   15812 wm_set_eee_pchlan(struct wm_softc *sc)
   15813 {
   15814 	device_t dev = sc->sc_dev;
   15815 	struct ethercom *ec = &sc->sc_ethercom;
   15816 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15817 	int rv = 0;
   15818 
   15819 	switch (sc->sc_phytype) {
   15820 	case WMPHY_82579:
   15821 		lpa = I82579_EEE_LP_ABILITY;
   15822 		pcs_status = I82579_EEE_PCS_STATUS;
   15823 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15824 		break;
   15825 	case WMPHY_I217:
   15826 		lpa = I217_EEE_LP_ABILITY;
   15827 		pcs_status = I217_EEE_PCS_STATUS;
   15828 		adv_addr = I217_EEE_ADVERTISEMENT;
   15829 		break;
   15830 	default:
   15831 		return 0;
   15832 	}
   15833 
   15834 	if (sc->phy.acquire(sc)) {
   15835 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15836 		return 0;
   15837 	}
   15838 
   15839 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15840 	if (rv != 0)
   15841 		goto release;
   15842 
   15843 	/* Clear bits that enable EEE in various speeds */
   15844 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15845 
   15846 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15847 		/* Save off link partner's EEE ability */
   15848 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15849 		if (rv != 0)
   15850 			goto release;
   15851 
   15852 		/* Read EEE advertisement */
   15853 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15854 			goto release;
   15855 
   15856 		/*
   15857 		 * Enable EEE only for speeds in which the link partner is
   15858 		 * EEE capable and for which we advertise EEE.
   15859 		 */
   15860 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15861 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15862 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15863 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15864 			if ((data & ANLPAR_TX_FD) != 0)
   15865 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15866 			else {
   15867 				/*
   15868 				 * EEE is not supported in 100Half, so ignore
   15869 				 * partner's EEE in 100 ability if full-duplex
   15870 				 * is not advertised.
   15871 				 */
   15872 				sc->eee_lp_ability
   15873 				    &= ~AN_EEEADVERT_100_TX;
   15874 			}
   15875 		}
   15876 	}
   15877 
   15878 	if (sc->sc_phytype == WMPHY_82579) {
   15879 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15880 		if (rv != 0)
   15881 			goto release;
   15882 
   15883 		data &= ~I82579_LPI_PLL_SHUT_100;
   15884 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15885 	}
   15886 
   15887 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15888 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15889 		goto release;
   15890 
   15891 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15892 release:
   15893 	sc->phy.release(sc);
   15894 
   15895 	return rv;
   15896 }
   15897 
   15898 static int
   15899 wm_set_eee(struct wm_softc *sc)
   15900 {
   15901 	struct ethercom *ec = &sc->sc_ethercom;
   15902 
   15903 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15904 		return 0;
   15905 
   15906 	if (sc->sc_type == WM_T_I354) {
   15907 		/* I354 uses an external PHY */
   15908 		return 0; /* not yet */
   15909 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15910 		return wm_set_eee_i350(sc);
   15911 	else if (sc->sc_type >= WM_T_PCH2)
   15912 		return wm_set_eee_pchlan(sc);
   15913 
   15914 	return 0;
   15915 }
   15916 
   15917 /*
   15918  * Workarounds (mainly PHY related).
   15919  * Basically, PHY's workarounds are in the PHY drivers.
   15920  */
   15921 
   15922 /* Work-around for 82566 Kumeran PCS lock loss */
   15923 static int
   15924 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15925 {
   15926 	struct mii_data *mii = &sc->sc_mii;
   15927 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15928 	int i, reg, rv;
   15929 	uint16_t phyreg;
   15930 
   15931 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15932 		device_xname(sc->sc_dev), __func__));
   15933 
   15934 	/* If the link is not up, do nothing */
   15935 	if ((status & STATUS_LU) == 0)
   15936 		return 0;
   15937 
   15938 	/* Nothing to do if the link is other than 1Gbps */
   15939 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15940 		return 0;
   15941 
   15942 	for (i = 0; i < 10; i++) {
   15943 		/* read twice */
   15944 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15945 		if (rv != 0)
   15946 			return rv;
   15947 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15948 		if (rv != 0)
   15949 			return rv;
   15950 
   15951 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15952 			goto out;	/* GOOD! */
   15953 
   15954 		/* Reset the PHY */
   15955 		wm_reset_phy(sc);
   15956 		delay(5*1000);
   15957 	}
   15958 
   15959 	/* Disable GigE link negotiation */
   15960 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15961 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15962 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15963 
   15964 	/*
   15965 	 * Call gig speed drop workaround on Gig disable before accessing
   15966 	 * any PHY registers.
   15967 	 */
   15968 	wm_gig_downshift_workaround_ich8lan(sc);
   15969 
   15970 out:
   15971 	return 0;
   15972 }
   15973 
   15974 /*
   15975  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15976  *  @sc: pointer to the HW structure
   15977  *
   15978  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15979  *  LPLU, Gig disable, MDIC PHY reset):
   15980  *    1) Set Kumeran Near-end loopback
   15981  *    2) Clear Kumeran Near-end loopback
   15982  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15983  */
   15984 static void
   15985 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15986 {
   15987 	uint16_t kmreg;
   15988 
   15989 	/* Only for igp3 */
   15990 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15991 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15992 			return;
   15993 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15994 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15995 			return;
   15996 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15997 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15998 	}
   15999 }
   16000 
   16001 /*
   16002  * Workaround for pch's PHYs
   16003  * XXX should be moved to new PHY driver?
   16004  */
   16005 static int
   16006 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16007 {
   16008 	device_t dev = sc->sc_dev;
   16009 	struct mii_data *mii = &sc->sc_mii;
   16010 	struct mii_softc *child;
   16011 	uint16_t phy_data, phyrev = 0;
   16012 	int phytype = sc->sc_phytype;
   16013 	int rv;
   16014 
   16015 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16016 		device_xname(dev), __func__));
   16017 	KASSERT(sc->sc_type == WM_T_PCH);
   16018 
   16019 	/* Set MDIO slow mode before any other MDIO access */
   16020 	if (phytype == WMPHY_82577)
   16021 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16022 			return rv;
   16023 
   16024 	child = LIST_FIRST(&mii->mii_phys);
   16025 	if (child != NULL)
   16026 		phyrev = child->mii_mpd_rev;
   16027 
   16028 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16029 	if ((child != NULL) &&
   16030 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16031 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16032 		/* Disable generation of early preamble (0x4431) */
   16033 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16034 		    &phy_data);
   16035 		if (rv != 0)
   16036 			return rv;
   16037 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16038 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16039 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16040 		    phy_data);
   16041 		if (rv != 0)
   16042 			return rv;
   16043 
   16044 		/* Preamble tuning for SSC */
   16045 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16046 		if (rv != 0)
   16047 			return rv;
   16048 	}
   16049 
   16050 	/* 82578 */
   16051 	if (phytype == WMPHY_82578) {
   16052 		/*
   16053 		 * Return registers to default by doing a soft reset then
   16054 		 * writing 0x3140 to the control register
   16055 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16056 		 */
   16057 		if ((child != NULL) && (phyrev < 2)) {
   16058 			PHY_RESET(child);
   16059 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16060 			if (rv != 0)
   16061 				return rv;
   16062 		}
   16063 	}
   16064 
   16065 	/* Select page 0 */
   16066 	if ((rv = sc->phy.acquire(sc)) != 0)
   16067 		return rv;
   16068 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16069 	sc->phy.release(sc);
   16070 	if (rv != 0)
   16071 		return rv;
   16072 
   16073 	/*
   16074 	 * Configure the K1 Si workaround during phy reset assuming there is
   16075 	 * link so that it disables K1 if link is in 1Gbps.
   16076 	 */
   16077 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16078 		return rv;
   16079 
   16080 	/* Workaround for link disconnects on a busy hub in half duplex */
   16081 	rv = sc->phy.acquire(sc);
   16082 	if (rv)
   16083 		return rv;
   16084 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16085 	if (rv)
   16086 		goto release;
   16087 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16088 	    phy_data & 0x00ff);
   16089 	if (rv)
   16090 		goto release;
   16091 
   16092 	/* Set MSE higher to enable link to stay up when noise is high */
   16093 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16094 release:
   16095 	sc->phy.release(sc);
   16096 
   16097 	return rv;
   16098 }
   16099 
   16100 /*
   16101  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16102  *  @sc:   pointer to the HW structure
   16103  */
   16104 static void
   16105 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16106 {
   16107 
   16108 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16109 		device_xname(sc->sc_dev), __func__));
   16110 
   16111 	if (sc->phy.acquire(sc) != 0)
   16112 		return;
   16113 
   16114 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16115 
   16116 	sc->phy.release(sc);
   16117 }
   16118 
   16119 static void
   16120 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16121 {
   16122 	device_t dev = sc->sc_dev;
   16123 	uint32_t mac_reg;
   16124 	uint16_t i, wuce;
   16125 	int count;
   16126 
   16127 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16128 		device_xname(dev), __func__));
   16129 
   16130 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16131 		return;
   16132 
   16133 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16134 	count = wm_rar_count(sc);
   16135 	for (i = 0; i < count; i++) {
   16136 		uint16_t lo, hi;
   16137 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16138 		lo = (uint16_t)(mac_reg & 0xffff);
   16139 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16140 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16141 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16142 
   16143 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16144 		lo = (uint16_t)(mac_reg & 0xffff);
   16145 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16146 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16147 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16148 	}
   16149 
   16150 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16151 }
   16152 
   16153 /*
   16154  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16155  *  with 82579 PHY
   16156  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16157  */
   16158 static int
   16159 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16160 {
   16161 	device_t dev = sc->sc_dev;
   16162 	int rar_count;
   16163 	int rv;
   16164 	uint32_t mac_reg;
   16165 	uint16_t dft_ctrl, data;
   16166 	uint16_t i;
   16167 
   16168 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16169 		device_xname(dev), __func__));
   16170 
   16171 	if (sc->sc_type < WM_T_PCH2)
   16172 		return 0;
   16173 
   16174 	/* Acquire PHY semaphore */
   16175 	rv = sc->phy.acquire(sc);
   16176 	if (rv != 0)
   16177 		return rv;
   16178 
   16179 	/* Disable Rx path while enabling/disabling workaround */
   16180 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16181 	if (rv != 0)
   16182 		goto out;
   16183 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16184 	    dft_ctrl | (1 << 14));
   16185 	if (rv != 0)
   16186 		goto out;
   16187 
   16188 	if (enable) {
   16189 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16190 		 * SHRAL/H) and initial CRC values to the MAC
   16191 		 */
   16192 		rar_count = wm_rar_count(sc);
   16193 		for (i = 0; i < rar_count; i++) {
   16194 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16195 			uint32_t addr_high, addr_low;
   16196 
   16197 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16198 			if (!(addr_high & RAL_AV))
   16199 				continue;
   16200 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16201 			mac_addr[0] = (addr_low & 0xFF);
   16202 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16203 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16204 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16205 			mac_addr[4] = (addr_high & 0xFF);
   16206 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16207 
   16208 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16209 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16210 		}
   16211 
   16212 		/* Write Rx addresses to the PHY */
   16213 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16214 	}
   16215 
   16216 	/*
   16217 	 * If enable ==
   16218 	 *	true: Enable jumbo frame workaround in the MAC.
   16219 	 *	false: Write MAC register values back to h/w defaults.
   16220 	 */
   16221 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16222 	if (enable) {
   16223 		mac_reg &= ~(1 << 14);
   16224 		mac_reg |= (7 << 15);
   16225 	} else
   16226 		mac_reg &= ~(0xf << 14);
   16227 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16228 
   16229 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16230 	if (enable) {
   16231 		mac_reg |= RCTL_SECRC;
   16232 		sc->sc_rctl |= RCTL_SECRC;
   16233 		sc->sc_flags |= WM_F_CRC_STRIP;
   16234 	} else {
   16235 		mac_reg &= ~RCTL_SECRC;
   16236 		sc->sc_rctl &= ~RCTL_SECRC;
   16237 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16238 	}
   16239 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16240 
   16241 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16242 	if (rv != 0)
   16243 		goto out;
   16244 	if (enable)
   16245 		data |= 1 << 0;
   16246 	else
   16247 		data &= ~(1 << 0);
   16248 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16249 	if (rv != 0)
   16250 		goto out;
   16251 
   16252 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16253 	if (rv != 0)
   16254 		goto out;
   16255 	/*
   16256 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16257 	 * on both the enable case and the disable case. Is it correct?
   16258 	 */
   16259 	data &= ~(0xf << 8);
   16260 	data |= (0xb << 8);
   16261 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16262 	if (rv != 0)
   16263 		goto out;
   16264 
   16265 	/*
   16266 	 * If enable ==
   16267 	 *	true: Enable jumbo frame workaround in the PHY.
   16268 	 *	false: Write PHY register values back to h/w defaults.
   16269 	 */
   16270 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16271 	if (rv != 0)
   16272 		goto out;
   16273 	data &= ~(0x7F << 5);
   16274 	if (enable)
   16275 		data |= (0x37 << 5);
   16276 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16277 	if (rv != 0)
   16278 		goto out;
   16279 
   16280 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16281 	if (rv != 0)
   16282 		goto out;
   16283 	if (enable)
   16284 		data &= ~(1 << 13);
   16285 	else
   16286 		data |= (1 << 13);
   16287 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16288 	if (rv != 0)
   16289 		goto out;
   16290 
   16291 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16292 	if (rv != 0)
   16293 		goto out;
   16294 	data &= ~(0x3FF << 2);
   16295 	if (enable)
   16296 		data |= (I82579_TX_PTR_GAP << 2);
   16297 	else
   16298 		data |= (0x8 << 2);
   16299 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16300 	if (rv != 0)
   16301 		goto out;
   16302 
   16303 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16304 	    enable ? 0xf100 : 0x7e00);
   16305 	if (rv != 0)
   16306 		goto out;
   16307 
   16308 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16309 	if (rv != 0)
   16310 		goto out;
   16311 	if (enable)
   16312 		data |= 1 << 10;
   16313 	else
   16314 		data &= ~(1 << 10);
   16315 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16316 	if (rv != 0)
   16317 		goto out;
   16318 
   16319 	/* Re-enable Rx path after enabling/disabling workaround */
   16320 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16321 	    dft_ctrl & ~(1 << 14));
   16322 
   16323 out:
   16324 	sc->phy.release(sc);
   16325 
   16326 	return rv;
   16327 }
   16328 
   16329 /*
   16330  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16331  *  done after every PHY reset.
   16332  */
   16333 static int
   16334 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16335 {
   16336 	device_t dev = sc->sc_dev;
   16337 	int rv;
   16338 
   16339 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16340 		device_xname(dev), __func__));
   16341 	KASSERT(sc->sc_type == WM_T_PCH2);
   16342 
   16343 	/* Set MDIO slow mode before any other MDIO access */
   16344 	rv = wm_set_mdio_slow_mode_hv(sc);
   16345 	if (rv != 0)
   16346 		return rv;
   16347 
   16348 	rv = sc->phy.acquire(sc);
   16349 	if (rv != 0)
   16350 		return rv;
   16351 	/* Set MSE higher to enable link to stay up when noise is high */
   16352 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16353 	if (rv != 0)
   16354 		goto release;
   16355 	/* Drop link after 5 times MSE threshold was reached */
   16356 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16357 release:
   16358 	sc->phy.release(sc);
   16359 
   16360 	return rv;
   16361 }
   16362 
   16363 /**
   16364  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16365  *  @link: link up bool flag
   16366  *
   16367  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16368  *  preventing further DMA write requests.  Workaround the issue by disabling
   16369  *  the de-assertion of the clock request when in 1Gpbs mode.
   16370  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   16371  *  speeds in order to avoid Tx hangs.
   16372  **/
   16373 static int
   16374 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   16375 {
   16376 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   16377 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16378 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   16379 	uint16_t phyreg;
   16380 
   16381 	if (link && (speed == STATUS_SPEED_1000)) {
   16382 		sc->phy.acquire(sc);
   16383 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16384 		    &phyreg);
   16385 		if (rv != 0)
   16386 			goto release;
   16387 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16388 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   16389 		if (rv != 0)
   16390 			goto release;
   16391 		delay(20);
   16392 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   16393 
   16394 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16395 		    &phyreg);
   16396 release:
   16397 		sc->phy.release(sc);
   16398 		return rv;
   16399 	}
   16400 
   16401 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   16402 
   16403 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   16404 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   16405 	    || !link
   16406 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   16407 		goto update_fextnvm6;
   16408 
   16409 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   16410 
   16411 	/* Clear link status transmit timeout */
   16412 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   16413 	if (speed == STATUS_SPEED_100) {
   16414 		/* Set inband Tx timeout to 5x10us for 100Half */
   16415 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16416 
   16417 		/* Do not extend the K1 entry latency for 100Half */
   16418 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16419 	} else {
   16420 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   16421 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16422 
   16423 		/* Extend the K1 entry latency for 10 Mbps */
   16424 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16425 	}
   16426 
   16427 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   16428 
   16429 update_fextnvm6:
   16430 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   16431 	return 0;
   16432 }
   16433 
   16434 /*
   16435  *  wm_k1_gig_workaround_hv - K1 Si workaround
   16436  *  @sc:   pointer to the HW structure
   16437  *  @link: link up bool flag
   16438  *
   16439  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   16440  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   16441  *  If link is down, the function will restore the default K1 setting located
   16442  *  in the NVM.
   16443  */
   16444 static int
   16445 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   16446 {
   16447 	int k1_enable = sc->sc_nvm_k1_enabled;
   16448 
   16449 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16450 		device_xname(sc->sc_dev), __func__));
   16451 
   16452 	if (sc->phy.acquire(sc) != 0)
   16453 		return -1;
   16454 
   16455 	if (link) {
   16456 		k1_enable = 0;
   16457 
   16458 		/* Link stall fix for link up */
   16459 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16460 		    0x0100);
   16461 	} else {
   16462 		/* Link stall fix for link down */
   16463 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16464 		    0x4100);
   16465 	}
   16466 
   16467 	wm_configure_k1_ich8lan(sc, k1_enable);
   16468 	sc->phy.release(sc);
   16469 
   16470 	return 0;
   16471 }
   16472 
   16473 /*
   16474  *  wm_k1_workaround_lv - K1 Si workaround
   16475  *  @sc:   pointer to the HW structure
   16476  *
   16477  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16478  *  Disable K1 for 1000 and 100 speeds
   16479  */
   16480 static int
   16481 wm_k1_workaround_lv(struct wm_softc *sc)
   16482 {
   16483 	uint32_t reg;
   16484 	uint16_t phyreg;
   16485 	int rv;
   16486 
   16487 	if (sc->sc_type != WM_T_PCH2)
   16488 		return 0;
   16489 
   16490 	/* Set K1 beacon duration based on 10Mbps speed */
   16491 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16492 	if (rv != 0)
   16493 		return rv;
   16494 
   16495 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16496 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16497 		if (phyreg &
   16498 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16499 			/* LV 1G/100 Packet drop issue wa  */
   16500 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16501 			    &phyreg);
   16502 			if (rv != 0)
   16503 				return rv;
   16504 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16505 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16506 			    phyreg);
   16507 			if (rv != 0)
   16508 				return rv;
   16509 		} else {
   16510 			/* For 10Mbps */
   16511 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16512 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16513 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16514 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16515 		}
   16516 	}
   16517 
   16518 	return 0;
   16519 }
   16520 
   16521 /*
   16522  *  wm_link_stall_workaround_hv - Si workaround
   16523  *  @sc: pointer to the HW structure
   16524  *
   16525  *  This function works around a Si bug where the link partner can get
   16526  *  a link up indication before the PHY does. If small packets are sent
   16527  *  by the link partner they can be placed in the packet buffer without
   16528  *  being properly accounted for by the PHY and will stall preventing
   16529  *  further packets from being received.  The workaround is to clear the
   16530  *  packet buffer after the PHY detects link up.
   16531  */
   16532 static int
   16533 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16534 {
   16535 	uint16_t phyreg;
   16536 
   16537 	if (sc->sc_phytype != WMPHY_82578)
   16538 		return 0;
   16539 
   16540 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16541 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16542 	if ((phyreg & BMCR_LOOP) != 0)
   16543 		return 0;
   16544 
   16545 	/* Check if link is up and at 1Gbps */
   16546 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16547 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16548 	    | BM_CS_STATUS_SPEED_MASK;
   16549 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16550 		| BM_CS_STATUS_SPEED_1000))
   16551 		return 0;
   16552 
   16553 	delay(200 * 1000);	/* XXX too big */
   16554 
   16555 	/* Flush the packets in the fifo buffer */
   16556 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16557 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16558 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16559 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16560 
   16561 	return 0;
   16562 }
   16563 
   16564 static int
   16565 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16566 {
   16567 	int rv;
   16568 	uint16_t reg;
   16569 
   16570 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16571 	if (rv != 0)
   16572 		return rv;
   16573 
   16574 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16575 	    reg | HV_KMRN_MDIO_SLOW);
   16576 }
   16577 
   16578 /*
   16579  *  wm_configure_k1_ich8lan - Configure K1 power state
   16580  *  @sc: pointer to the HW structure
   16581  *  @enable: K1 state to configure
   16582  *
   16583  *  Configure the K1 power state based on the provided parameter.
   16584  *  Assumes semaphore already acquired.
   16585  */
   16586 static void
   16587 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16588 {
   16589 	uint32_t ctrl, ctrl_ext, tmp;
   16590 	uint16_t kmreg;
   16591 	int rv;
   16592 
   16593 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16594 
   16595 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16596 	if (rv != 0)
   16597 		return;
   16598 
   16599 	if (k1_enable)
   16600 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16601 	else
   16602 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16603 
   16604 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16605 	if (rv != 0)
   16606 		return;
   16607 
   16608 	delay(20);
   16609 
   16610 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16611 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16612 
   16613 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16614 	tmp |= CTRL_FRCSPD;
   16615 
   16616 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16617 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16618 	CSR_WRITE_FLUSH(sc);
   16619 	delay(20);
   16620 
   16621 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16622 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16623 	CSR_WRITE_FLUSH(sc);
   16624 	delay(20);
   16625 
   16626 	return;
   16627 }
   16628 
   16629 /* special case - for 82575 - need to do manual init ... */
   16630 static void
   16631 wm_reset_init_script_82575(struct wm_softc *sc)
   16632 {
   16633 	/*
   16634 	 * Remark: this is untested code - we have no board without EEPROM
   16635 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16636 	 */
   16637 
   16638 	/* SerDes configuration via SERDESCTRL */
   16639 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16640 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16641 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16642 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16643 
   16644 	/* CCM configuration via CCMCTL register */
   16645 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16646 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16647 
   16648 	/* PCIe lanes configuration */
   16649 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16650 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16651 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16652 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16653 
   16654 	/* PCIe PLL Configuration */
   16655 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16656 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16657 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16658 }
   16659 
   16660 static void
   16661 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16662 {
   16663 	uint32_t reg;
   16664 	uint16_t nvmword;
   16665 	int rv;
   16666 
   16667 	if (sc->sc_type != WM_T_82580)
   16668 		return;
   16669 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16670 		return;
   16671 
   16672 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16673 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16674 	if (rv != 0) {
   16675 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16676 		    __func__);
   16677 		return;
   16678 	}
   16679 
   16680 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16681 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16682 		reg |= MDICNFG_DEST;
   16683 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16684 		reg |= MDICNFG_COM_MDIO;
   16685 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16686 }
   16687 
   16688 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16689 
   16690 static bool
   16691 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16692 {
   16693 	uint32_t reg;
   16694 	uint16_t id1, id2;
   16695 	int i, rv;
   16696 
   16697 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16698 		device_xname(sc->sc_dev), __func__));
   16699 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16700 
   16701 	id1 = id2 = 0xffff;
   16702 	for (i = 0; i < 2; i++) {
   16703 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16704 		    &id1);
   16705 		if ((rv != 0) || MII_INVALIDID(id1))
   16706 			continue;
   16707 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16708 		    &id2);
   16709 		if ((rv != 0) || MII_INVALIDID(id2))
   16710 			continue;
   16711 		break;
   16712 	}
   16713 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16714 		goto out;
   16715 
   16716 	/*
   16717 	 * In case the PHY needs to be in mdio slow mode,
   16718 	 * set slow mode and try to get the PHY id again.
   16719 	 */
   16720 	rv = 0;
   16721 	if (sc->sc_type < WM_T_PCH_LPT) {
   16722 		sc->phy.release(sc);
   16723 		wm_set_mdio_slow_mode_hv(sc);
   16724 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16725 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16726 		sc->phy.acquire(sc);
   16727 	}
   16728 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16729 		device_printf(sc->sc_dev, "XXX return with false\n");
   16730 		return false;
   16731 	}
   16732 out:
   16733 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16734 		/* Only unforce SMBus if ME is not active */
   16735 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16736 			uint16_t phyreg;
   16737 
   16738 			/* Unforce SMBus mode in PHY */
   16739 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16740 			    CV_SMB_CTRL, &phyreg);
   16741 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16742 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16743 			    CV_SMB_CTRL, phyreg);
   16744 
   16745 			/* Unforce SMBus mode in MAC */
   16746 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16747 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16748 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16749 		}
   16750 	}
   16751 	return true;
   16752 }
   16753 
   16754 static void
   16755 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16756 {
   16757 	uint32_t reg;
   16758 	int i;
   16759 
   16760 	/* Set PHY Config Counter to 50msec */
   16761 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16762 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16763 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16764 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16765 
   16766 	/* Toggle LANPHYPC */
   16767 	reg = CSR_READ(sc, WMREG_CTRL);
   16768 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16769 	reg &= ~CTRL_LANPHYPC_VALUE;
   16770 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16771 	CSR_WRITE_FLUSH(sc);
   16772 	delay(1000);
   16773 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16774 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16775 	CSR_WRITE_FLUSH(sc);
   16776 
   16777 	if (sc->sc_type < WM_T_PCH_LPT)
   16778 		delay(50 * 1000);
   16779 	else {
   16780 		i = 20;
   16781 
   16782 		do {
   16783 			delay(5 * 1000);
   16784 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16785 		    && i--);
   16786 
   16787 		delay(30 * 1000);
   16788 	}
   16789 }
   16790 
   16791 static int
   16792 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16793 {
   16794 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16795 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16796 	uint32_t rxa;
   16797 	uint16_t scale = 0, lat_enc = 0;
   16798 	int32_t obff_hwm = 0;
   16799 	int64_t lat_ns, value;
   16800 
   16801 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16802 		device_xname(sc->sc_dev), __func__));
   16803 
   16804 	if (link) {
   16805 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16806 		uint32_t status;
   16807 		uint16_t speed;
   16808 		pcireg_t preg;
   16809 
   16810 		status = CSR_READ(sc, WMREG_STATUS);
   16811 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16812 		case STATUS_SPEED_10:
   16813 			speed = 10;
   16814 			break;
   16815 		case STATUS_SPEED_100:
   16816 			speed = 100;
   16817 			break;
   16818 		case STATUS_SPEED_1000:
   16819 			speed = 1000;
   16820 			break;
   16821 		default:
   16822 			device_printf(sc->sc_dev, "Unknown speed "
   16823 			    "(status = %08x)\n", status);
   16824 			return -1;
   16825 		}
   16826 
   16827 		/* Rx Packet Buffer Allocation size (KB) */
   16828 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16829 
   16830 		/*
   16831 		 * Determine the maximum latency tolerated by the device.
   16832 		 *
   16833 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16834 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16835 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16836 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16837 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16838 		 */
   16839 		lat_ns = ((int64_t)rxa * 1024 -
   16840 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16841 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16842 		if (lat_ns < 0)
   16843 			lat_ns = 0;
   16844 		else
   16845 			lat_ns /= speed;
   16846 		value = lat_ns;
   16847 
   16848 		while (value > LTRV_VALUE) {
   16849 			scale ++;
   16850 			value = howmany(value, __BIT(5));
   16851 		}
   16852 		if (scale > LTRV_SCALE_MAX) {
   16853 			device_printf(sc->sc_dev,
   16854 			    "Invalid LTR latency scale %d\n", scale);
   16855 			return -1;
   16856 		}
   16857 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16858 
   16859 		/* Determine the maximum latency tolerated by the platform */
   16860 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16861 		    WM_PCI_LTR_CAP_LPT);
   16862 		max_snoop = preg & 0xffff;
   16863 		max_nosnoop = preg >> 16;
   16864 
   16865 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16866 
   16867 		if (lat_enc > max_ltr_enc) {
   16868 			lat_enc = max_ltr_enc;
   16869 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16870 			    * PCI_LTR_SCALETONS(
   16871 				    __SHIFTOUT(lat_enc,
   16872 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16873 		}
   16874 
   16875 		if (lat_ns) {
   16876 			lat_ns *= speed * 1000;
   16877 			lat_ns /= 8;
   16878 			lat_ns /= 1000000000;
   16879 			obff_hwm = (int32_t)(rxa - lat_ns);
   16880 		}
   16881 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16882 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16883 			    "(rxa = %d, lat_ns = %d)\n",
   16884 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16885 			return -1;
   16886 		}
   16887 	}
   16888 	/* Snoop and No-Snoop latencies the same */
   16889 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16890 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16891 
   16892 	/* Set OBFF high water mark */
   16893 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16894 	reg |= obff_hwm;
   16895 	CSR_WRITE(sc, WMREG_SVT, reg);
   16896 
   16897 	/* Enable OBFF */
   16898 	reg = CSR_READ(sc, WMREG_SVCR);
   16899 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16900 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16901 
   16902 	return 0;
   16903 }
   16904 
   16905 /*
   16906  * I210 Errata 25 and I211 Errata 10
   16907  * Slow System Clock.
   16908  *
   16909  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   16910  */
   16911 static int
   16912 wm_pll_workaround_i210(struct wm_softc *sc)
   16913 {
   16914 	uint32_t mdicnfg, wuc;
   16915 	uint32_t reg;
   16916 	pcireg_t pcireg;
   16917 	uint32_t pmreg;
   16918 	uint16_t nvmword, tmp_nvmword;
   16919 	uint16_t phyval;
   16920 	bool wa_done = false;
   16921 	int i, rv = 0;
   16922 
   16923 	/* Get Power Management cap offset */
   16924 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16925 	    &pmreg, NULL) == 0)
   16926 		return -1;
   16927 
   16928 	/* Save WUC and MDICNFG registers */
   16929 	wuc = CSR_READ(sc, WMREG_WUC);
   16930 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16931 
   16932 	reg = mdicnfg & ~MDICNFG_DEST;
   16933 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16934 
   16935 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   16936 		/*
   16937 		 * The default value of the Initialization Control Word 1
   16938 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   16939 		 */
   16940 		nvmword = INVM_DEFAULT_AL;
   16941 	}
   16942 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16943 
   16944 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16945 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16946 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16947 
   16948 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16949 			rv = 0;
   16950 			break; /* OK */
   16951 		} else
   16952 			rv = -1;
   16953 
   16954 		wa_done = true;
   16955 		/* Directly reset the internal PHY */
   16956 		reg = CSR_READ(sc, WMREG_CTRL);
   16957 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16958 
   16959 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16960 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16961 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16962 
   16963 		CSR_WRITE(sc, WMREG_WUC, 0);
   16964 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16965 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16966 
   16967 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16968 		    pmreg + PCI_PMCSR);
   16969 		pcireg |= PCI_PMCSR_STATE_D3;
   16970 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16971 		    pmreg + PCI_PMCSR, pcireg);
   16972 		delay(1000);
   16973 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16974 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16975 		    pmreg + PCI_PMCSR, pcireg);
   16976 
   16977 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16978 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16979 
   16980 		/* Restore WUC register */
   16981 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16982 	}
   16983 
   16984 	/* Restore MDICNFG setting */
   16985 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16986 	if (wa_done)
   16987 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16988 	return rv;
   16989 }
   16990 
   16991 static void
   16992 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16993 {
   16994 	uint32_t reg;
   16995 
   16996 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16997 		device_xname(sc->sc_dev), __func__));
   16998 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16999 	    || (sc->sc_type == WM_T_PCH_CNP));
   17000 
   17001 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   17002 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   17003 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17004 
   17005 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17006 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17007 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17008 }
   17009 
   17010 /* Sysctl function */
   17011 #ifdef WM_DEBUG
   17012 static int
   17013 wm_sysctl_debug(SYSCTLFN_ARGS)
   17014 {
   17015 	struct sysctlnode node = *rnode;
   17016 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17017 	uint32_t dflags;
   17018 	int error;
   17019 
   17020 	dflags = sc->sc_debug;
   17021 	node.sysctl_data = &dflags;
   17022 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17023 
   17024 	if (error || newp == NULL)
   17025 		return error;
   17026 
   17027 	sc->sc_debug = dflags;
   17028 
   17029 	return 0;
   17030 }
   17031 #endif
   17032