Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.705
      1 /*	$NetBSD: if_wm.c,v 1.705 2021/06/16 00:21:18 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.705 2021/06/16 00:21:18 riastradh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 #include <sys/atomic.h>
    111 
    112 #include <sys/rndsource.h>
    113 
    114 #include <net/if.h>
    115 #include <net/if_dl.h>
    116 #include <net/if_media.h>
    117 #include <net/if_ether.h>
    118 
    119 #include <net/bpf.h>
    120 
    121 #include <net/rss_config.h>
    122 
    123 #include <netinet/in.h>			/* XXX for struct ip */
    124 #include <netinet/in_systm.h>		/* XXX for struct ip */
    125 #include <netinet/ip.h>			/* XXX for struct ip */
    126 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    127 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    128 
    129 #include <sys/bus.h>
    130 #include <sys/intr.h>
    131 #include <machine/endian.h>
    132 
    133 #include <dev/mii/mii.h>
    134 #include <dev/mii/mdio.h>
    135 #include <dev/mii/miivar.h>
    136 #include <dev/mii/miidevs.h>
    137 #include <dev/mii/mii_bitbang.h>
    138 #include <dev/mii/ikphyreg.h>
    139 #include <dev/mii/igphyreg.h>
    140 #include <dev/mii/igphyvar.h>
    141 #include <dev/mii/inbmphyreg.h>
    142 #include <dev/mii/ihphyreg.h>
    143 #include <dev/mii/makphyreg.h>
    144 
    145 #include <dev/pci/pcireg.h>
    146 #include <dev/pci/pcivar.h>
    147 #include <dev/pci/pcidevs.h>
    148 
    149 #include <dev/pci/if_wmreg.h>
    150 #include <dev/pci/if_wmvar.h>
    151 
    152 #ifdef WM_DEBUG
    153 #define	WM_DEBUG_LINK		__BIT(0)
    154 #define	WM_DEBUG_TX		__BIT(1)
    155 #define	WM_DEBUG_RX		__BIT(2)
    156 #define	WM_DEBUG_GMII		__BIT(3)
    157 #define	WM_DEBUG_MANAGE		__BIT(4)
    158 #define	WM_DEBUG_NVM		__BIT(5)
    159 #define	WM_DEBUG_INIT		__BIT(6)
    160 #define	WM_DEBUG_LOCK		__BIT(7)
    161 
    162 #if 0
    163 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    164 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    165 	WM_DEBUG_LOCK
    166 #endif
    167 
    168 #define	DPRINTF(sc, x, y)			  \
    169 	do {					  \
    170 		if ((sc)->sc_debug & (x))	  \
    171 			printf y;		  \
    172 	} while (0)
    173 #else
    174 #define	DPRINTF(sc, x, y)	__nothing
    175 #endif /* WM_DEBUG */
    176 
    177 #ifdef NET_MPSAFE
    178 #define WM_MPSAFE	1
    179 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    180 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    181 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    182 #else
    183 #define WM_CALLOUT_FLAGS	0
    184 #define WM_SOFTINT_FLAGS	0
    185 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    186 #endif
    187 
    188 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    189 
    190 /*
    191  * This device driver's max interrupt numbers.
    192  */
    193 #define WM_MAX_NQUEUEINTR	16
    194 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    195 
    196 #ifndef WM_DISABLE_MSI
    197 #define	WM_DISABLE_MSI 0
    198 #endif
    199 #ifndef WM_DISABLE_MSIX
    200 #define	WM_DISABLE_MSIX 0
    201 #endif
    202 
    203 int wm_disable_msi = WM_DISABLE_MSI;
    204 int wm_disable_msix = WM_DISABLE_MSIX;
    205 
    206 #ifndef WM_WATCHDOG_TIMEOUT
    207 #define WM_WATCHDOG_TIMEOUT 5
    208 #endif
    209 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    210 
    211 /*
    212  * Transmit descriptor list size.  Due to errata, we can only have
    213  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    214  * on >= 82544. We tell the upper layers that they can queue a lot
    215  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    216  * of them at a time.
    217  *
    218  * We allow up to 64 DMA segments per packet.  Pathological packet
    219  * chains containing many small mbufs have been observed in zero-copy
    220  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    221  * m_defrag() is called to reduce it.
    222  */
    223 #define	WM_NTXSEGS		64
    224 #define	WM_IFQUEUELEN		256
    225 #define	WM_TXQUEUELEN_MAX	64
    226 #define	WM_TXQUEUELEN_MAX_82547	16
    227 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    228 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    229 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    230 #define	WM_NTXDESC_82542	256
    231 #define	WM_NTXDESC_82544	4096
    232 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    233 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    234 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    235 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    236 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    237 
    238 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    239 
    240 #define	WM_TXINTERQSIZE		256
    241 
    242 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 /*
    250  * Receive descriptor list size.  We have one Rx buffer for normal
    251  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    252  * packet.  We allocate 256 receive descriptors, each with a 2k
    253  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    254  */
    255 #define	WM_NRXDESC		256U
    256 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    257 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    258 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    259 
    260 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    261 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    262 #endif
    263 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    264 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    265 #endif
    266 
    267 typedef union txdescs {
    268 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    269 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    270 } txdescs_t;
    271 
    272 typedef union rxdescs {
    273 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    274 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    275 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    276 } rxdescs_t;
    277 
    278 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    279 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    280 
    281 /*
    282  * Software state for transmit jobs.
    283  */
    284 struct wm_txsoft {
    285 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    287 	int txs_firstdesc;		/* first descriptor in packet */
    288 	int txs_lastdesc;		/* last descriptor in packet */
    289 	int txs_ndesc;			/* # of descriptors used */
    290 };
    291 
    292 /*
    293  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    294  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    295  * them together.
    296  */
    297 struct wm_rxsoft {
    298 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    299 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    300 };
    301 
    302 #define WM_LINKUP_TIMEOUT	50
    303 
    304 static uint16_t swfwphysem[] = {
    305 	SWFW_PHY0_SM,
    306 	SWFW_PHY1_SM,
    307 	SWFW_PHY2_SM,
    308 	SWFW_PHY3_SM
    309 };
    310 
    311 static const uint32_t wm_82580_rxpbs_table[] = {
    312 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    313 };
    314 
    315 struct wm_softc;
    316 
    317 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    318 #if !defined(WM_EVENT_COUNTERS)
    319 #define WM_EVENT_COUNTERS 1
    320 #endif
    321 #endif
    322 
    323 #ifdef WM_EVENT_COUNTERS
    324 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    325 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    326 	struct evcnt qname##_ev_##evname;
    327 
    328 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    329 	do {								\
    330 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    331 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    332 		    "%s%02d%s", #qname, (qnum), #evname);		\
    333 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    334 		    (evtype), NULL, (xname),				\
    335 		    (q)->qname##_##evname##_evcnt_name);		\
    336 	} while (0)
    337 
    338 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    339 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    340 
    341 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    342 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    343 
    344 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    345 	evcnt_detach(&(q)->qname##_ev_##evname);
    346 #endif /* WM_EVENT_COUNTERS */
    347 
    348 struct wm_txqueue {
    349 	kmutex_t *txq_lock;		/* lock for tx operations */
    350 
    351 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    352 
    353 	/* Software state for the transmit descriptors. */
    354 	int txq_num;			/* must be a power of two */
    355 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    356 
    357 	/* TX control data structures. */
    358 	int txq_ndesc;			/* must be a power of two */
    359 	size_t txq_descsize;		/* a tx descriptor size */
    360 	txdescs_t *txq_descs_u;
    361 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    362 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    363 	int txq_desc_rseg;		/* real number of control segment */
    364 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    365 #define	txq_descs	txq_descs_u->sctxu_txdescs
    366 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    367 
    368 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    369 
    370 	int txq_free;			/* number of free Tx descriptors */
    371 	int txq_next;			/* next ready Tx descriptor */
    372 
    373 	int txq_sfree;			/* number of free Tx jobs */
    374 	int txq_snext;			/* next free Tx job */
    375 	int txq_sdirty;			/* dirty Tx jobs */
    376 
    377 	/* These 4 variables are used only on the 82547. */
    378 	int txq_fifo_size;		/* Tx FIFO size */
    379 	int txq_fifo_head;		/* current head of FIFO */
    380 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    381 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    382 
    383 	/*
    384 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    385 	 * CPUs. This queue intermediate them without block.
    386 	 */
    387 	pcq_t *txq_interq;
    388 
    389 	/*
    390 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    391 	 * to manage Tx H/W queue's busy flag.
    392 	 */
    393 	int txq_flags;			/* flags for H/W queue, see below */
    394 #define	WM_TXQ_NO_SPACE		0x1
    395 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    396 
    397 	bool txq_stopping;
    398 
    399 	bool txq_sending;
    400 	time_t txq_lastsent;
    401 
    402 	/* Checksum flags used for previous packet */
    403 	uint32_t	txq_last_hw_cmd;
    404 	uint8_t		txq_last_hw_fields;
    405 	uint16_t	txq_last_hw_ipcs;
    406 	uint16_t	txq_last_hw_tucs;
    407 
    408 	uint32_t txq_packets;		/* for AIM */
    409 	uint32_t txq_bytes;		/* for AIM */
    410 #ifdef WM_EVENT_COUNTERS
    411 	/* TX event counters */
    412 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    413 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    414 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    415 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    416 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    417 					    /* XXX not used? */
    418 
    419 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    420 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    421 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    422 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    423 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    424 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    425 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    426 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    427 					    /* other than toomanyseg */
    428 
    429 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    430 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    431 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    432 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
    433 
    434 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    435 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    436 #endif /* WM_EVENT_COUNTERS */
    437 };
    438 
    439 struct wm_rxqueue {
    440 	kmutex_t *rxq_lock;		/* lock for rx operations */
    441 
    442 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    443 
    444 	/* Software state for the receive descriptors. */
    445 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    446 
    447 	/* RX control data structures. */
    448 	int rxq_ndesc;			/* must be a power of two */
    449 	size_t rxq_descsize;		/* a rx descriptor size */
    450 	rxdescs_t *rxq_descs_u;
    451 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    452 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    453 	int rxq_desc_rseg;		/* real number of control segment */
    454 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    455 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    456 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    457 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    458 
    459 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    460 
    461 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    462 	int rxq_discard;
    463 	int rxq_len;
    464 	struct mbuf *rxq_head;
    465 	struct mbuf *rxq_tail;
    466 	struct mbuf **rxq_tailp;
    467 
    468 	bool rxq_stopping;
    469 
    470 	uint32_t rxq_packets;		/* for AIM */
    471 	uint32_t rxq_bytes;		/* for AIM */
    472 #ifdef WM_EVENT_COUNTERS
    473 	/* RX event counters */
    474 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    475 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    476 
    477 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    478 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    479 #endif
    480 };
    481 
    482 struct wm_queue {
    483 	int wmq_id;			/* index of TX/RX queues */
    484 	int wmq_intr_idx;		/* index of MSI-X tables */
    485 
    486 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    487 	bool wmq_set_itr;
    488 
    489 	struct wm_txqueue wmq_txq;
    490 	struct wm_rxqueue wmq_rxq;
    491 	char sysctlname[32];		/* Name for sysctl */
    492 
    493 	bool wmq_txrx_use_workqueue;
    494 	struct work wmq_cookie;
    495 	void *wmq_si;
    496 };
    497 
    498 struct wm_phyop {
    499 	int (*acquire)(struct wm_softc *);
    500 	void (*release)(struct wm_softc *);
    501 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    502 	int (*writereg_locked)(device_t, int, int, uint16_t);
    503 	int reset_delay_us;
    504 	bool no_errprint;
    505 };
    506 
    507 struct wm_nvmop {
    508 	int (*acquire)(struct wm_softc *);
    509 	void (*release)(struct wm_softc *);
    510 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    511 };
    512 
    513 /*
    514  * Software state per device.
    515  */
    516 struct wm_softc {
    517 	device_t sc_dev;		/* generic device information */
    518 	bus_space_tag_t sc_st;		/* bus space tag */
    519 	bus_space_handle_t sc_sh;	/* bus space handle */
    520 	bus_size_t sc_ss;		/* bus space size */
    521 	bus_space_tag_t sc_iot;		/* I/O space tag */
    522 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    523 	bus_size_t sc_ios;		/* I/O space size */
    524 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    525 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    526 	bus_size_t sc_flashs;		/* flash registers space size */
    527 	off_t sc_flashreg_offset;	/*
    528 					 * offset to flash registers from
    529 					 * start of BAR
    530 					 */
    531 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    532 
    533 	struct ethercom sc_ethercom;	/* ethernet common data */
    534 	struct mii_data sc_mii;		/* MII/media information */
    535 
    536 	pci_chipset_tag_t sc_pc;
    537 	pcitag_t sc_pcitag;
    538 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    539 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    540 
    541 	uint16_t sc_pcidevid;		/* PCI device ID */
    542 	wm_chip_type sc_type;		/* MAC type */
    543 	int sc_rev;			/* MAC revision */
    544 	wm_phy_type sc_phytype;		/* PHY type */
    545 	uint8_t sc_sfptype;		/* SFP type */
    546 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    547 #define	WM_MEDIATYPE_UNKNOWN		0x00
    548 #define	WM_MEDIATYPE_FIBER		0x01
    549 #define	WM_MEDIATYPE_COPPER		0x02
    550 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    551 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    552 	int sc_flags;			/* flags; see below */
    553 	u_short sc_if_flags;		/* last if_flags */
    554 	int sc_ec_capenable;		/* last ec_capenable */
    555 	int sc_flowflags;		/* 802.3x flow control flags */
    556 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    557 	int sc_align_tweak;
    558 
    559 	void *sc_ihs[WM_MAX_NINTR];	/*
    560 					 * interrupt cookie.
    561 					 * - legacy and msi use sc_ihs[0] only
    562 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    563 					 */
    564 	pci_intr_handle_t *sc_intrs;	/*
    565 					 * legacy and msi use sc_intrs[0] only
    566 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    567 					 */
    568 	int sc_nintrs;			/* number of interrupts */
    569 
    570 	int sc_link_intr_idx;		/* index of MSI-X tables */
    571 
    572 	callout_t sc_tick_ch;		/* tick callout */
    573 	bool sc_core_stopping;
    574 
    575 	int sc_nvm_ver_major;
    576 	int sc_nvm_ver_minor;
    577 	int sc_nvm_ver_build;
    578 	int sc_nvm_addrbits;		/* NVM address bits */
    579 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    580 	int sc_ich8_flash_base;
    581 	int sc_ich8_flash_bank_size;
    582 	int sc_nvm_k1_enabled;
    583 
    584 	int sc_nqueues;
    585 	struct wm_queue *sc_queue;
    586 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    587 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    588 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    589 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    590 	struct workqueue *sc_queue_wq;
    591 	bool sc_txrx_use_workqueue;
    592 
    593 	int sc_affinity_offset;
    594 
    595 #ifdef WM_EVENT_COUNTERS
    596 	/* Event counters. */
    597 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    598 
    599 	/* WM_T_82542_2_1 only */
    600 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    601 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    602 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    603 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    604 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    605 #endif /* WM_EVENT_COUNTERS */
    606 
    607 	struct sysctllog *sc_sysctllog;
    608 
    609 	/* This variable are used only on the 82547. */
    610 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    611 
    612 	uint32_t sc_ctrl;		/* prototype CTRL register */
    613 #if 0
    614 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    615 #endif
    616 	uint32_t sc_icr;		/* prototype interrupt bits */
    617 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    618 	uint32_t sc_tctl;		/* prototype TCTL register */
    619 	uint32_t sc_rctl;		/* prototype RCTL register */
    620 	uint32_t sc_txcw;		/* prototype TXCW register */
    621 	uint32_t sc_tipg;		/* prototype TIPG register */
    622 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    623 	uint32_t sc_pba;		/* prototype PBA register */
    624 
    625 	int sc_tbi_linkup;		/* TBI link status */
    626 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    627 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    628 
    629 	int sc_mchash_type;		/* multicast filter offset */
    630 
    631 	krndsource_t rnd_source;	/* random source */
    632 
    633 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    634 
    635 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    636 	kmutex_t *sc_ich_phymtx;	/*
    637 					 * 82574/82583/ICH/PCH specific PHY
    638 					 * mutex. For 82574/82583, the mutex
    639 					 * is used for both PHY and NVM.
    640 					 */
    641 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    642 
    643 	struct wm_phyop phy;
    644 	struct wm_nvmop nvm;
    645 #ifdef WM_DEBUG
    646 	uint32_t sc_debug;
    647 #endif
    648 };
    649 
    650 #define WM_CORE_LOCK(_sc)						\
    651 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    652 #define WM_CORE_UNLOCK(_sc)						\
    653 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    654 #define WM_CORE_LOCKED(_sc)						\
    655 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    656 
    657 #define	WM_RXCHAIN_RESET(rxq)						\
    658 do {									\
    659 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    660 	*(rxq)->rxq_tailp = NULL;					\
    661 	(rxq)->rxq_len = 0;						\
    662 } while (/*CONSTCOND*/0)
    663 
    664 #define	WM_RXCHAIN_LINK(rxq, m)						\
    665 do {									\
    666 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    667 	(rxq)->rxq_tailp = &(m)->m_next;				\
    668 } while (/*CONSTCOND*/0)
    669 
    670 #ifdef WM_EVENT_COUNTERS
    671 #ifdef __HAVE_ATOMIC64_LOADSTORE
    672 #define	WM_EVCNT_INCR(ev)						\
    673 	atomic_store_relaxed(&((ev)->ev_count),				\
    674 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    675 #define	WM_EVCNT_ADD(ev, val)						\
    676 	atomic_store_relaxed(&((ev)->ev_count),				\
    677 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    678 #else
    679 #define	WM_EVCNT_INCR(ev)						\
    680 	((ev)->ev_count)++
    681 #define	WM_EVCNT_ADD(ev, val)						\
    682 	(ev)->ev_count += (val)
    683 #endif
    684 
    685 #define WM_Q_EVCNT_INCR(qname, evname)			\
    686 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    687 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    688 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    689 #else /* !WM_EVENT_COUNTERS */
    690 #define	WM_EVCNT_INCR(ev)	/* nothing */
    691 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    692 
    693 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    694 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    695 #endif /* !WM_EVENT_COUNTERS */
    696 
    697 #define	CSR_READ(sc, reg)						\
    698 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    699 #define	CSR_WRITE(sc, reg, val)						\
    700 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    701 #define	CSR_WRITE_FLUSH(sc)						\
    702 	(void)CSR_READ((sc), WMREG_STATUS)
    703 
    704 #define ICH8_FLASH_READ32(sc, reg)					\
    705 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    706 	    (reg) + sc->sc_flashreg_offset)
    707 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    708 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    709 	    (reg) + sc->sc_flashreg_offset, (data))
    710 
    711 #define ICH8_FLASH_READ16(sc, reg)					\
    712 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    713 	    (reg) + sc->sc_flashreg_offset)
    714 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    715 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    716 	    (reg) + sc->sc_flashreg_offset, (data))
    717 
    718 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    719 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    720 
    721 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    722 #define	WM_CDTXADDR_HI(txq, x)						\
    723 	(sizeof(bus_addr_t) == 8 ?					\
    724 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    725 
    726 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    727 #define	WM_CDRXADDR_HI(rxq, x)						\
    728 	(sizeof(bus_addr_t) == 8 ?					\
    729 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    730 
    731 /*
    732  * Register read/write functions.
    733  * Other than CSR_{READ|WRITE}().
    734  */
    735 #if 0
    736 static inline uint32_t wm_io_read(struct wm_softc *, int);
    737 #endif
    738 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    739 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    740     uint32_t, uint32_t);
    741 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    742 
    743 /*
    744  * Descriptor sync/init functions.
    745  */
    746 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    747 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    748 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    749 
    750 /*
    751  * Device driver interface functions and commonly used functions.
    752  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    753  */
    754 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    755 static int	wm_match(device_t, cfdata_t, void *);
    756 static void	wm_attach(device_t, device_t, void *);
    757 static int	wm_detach(device_t, int);
    758 static bool	wm_suspend(device_t, const pmf_qual_t *);
    759 static bool	wm_resume(device_t, const pmf_qual_t *);
    760 static void	wm_watchdog(struct ifnet *);
    761 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    762     uint16_t *);
    763 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    764     uint16_t *);
    765 static void	wm_tick(void *);
    766 static int	wm_ifflags_cb(struct ethercom *);
    767 static int	wm_ioctl(struct ifnet *, u_long, void *);
    768 /* MAC address related */
    769 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    770 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    771 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    772 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    773 static int	wm_rar_count(struct wm_softc *);
    774 static void	wm_set_filter(struct wm_softc *);
    775 /* Reset and init related */
    776 static void	wm_set_vlan(struct wm_softc *);
    777 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    778 static void	wm_get_auto_rd_done(struct wm_softc *);
    779 static void	wm_lan_init_done(struct wm_softc *);
    780 static void	wm_get_cfg_done(struct wm_softc *);
    781 static int	wm_phy_post_reset(struct wm_softc *);
    782 static int	wm_write_smbus_addr(struct wm_softc *);
    783 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    784 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    785 static void	wm_initialize_hardware_bits(struct wm_softc *);
    786 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    787 static int	wm_reset_phy(struct wm_softc *);
    788 static void	wm_flush_desc_rings(struct wm_softc *);
    789 static void	wm_reset(struct wm_softc *);
    790 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    791 static void	wm_rxdrain(struct wm_rxqueue *);
    792 static void	wm_init_rss(struct wm_softc *);
    793 static void	wm_adjust_qnum(struct wm_softc *, int);
    794 static inline bool	wm_is_using_msix(struct wm_softc *);
    795 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    796 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    797 static int	wm_setup_legacy(struct wm_softc *);
    798 static int	wm_setup_msix(struct wm_softc *);
    799 static int	wm_init(struct ifnet *);
    800 static int	wm_init_locked(struct ifnet *);
    801 static void	wm_init_sysctls(struct wm_softc *);
    802 static void	wm_unset_stopping_flags(struct wm_softc *);
    803 static void	wm_set_stopping_flags(struct wm_softc *);
    804 static void	wm_stop(struct ifnet *, int);
    805 static void	wm_stop_locked(struct ifnet *, bool, bool);
    806 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    807 static void	wm_82547_txfifo_stall(void *);
    808 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    809 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    810 /* DMA related */
    811 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    812 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    813 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    814 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    815     struct wm_txqueue *);
    816 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    817 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    818 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    819     struct wm_rxqueue *);
    820 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    821 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    822 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    823 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    824 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    825 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    826 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    827     struct wm_txqueue *);
    828 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    829     struct wm_rxqueue *);
    830 static int	wm_alloc_txrx_queues(struct wm_softc *);
    831 static void	wm_free_txrx_queues(struct wm_softc *);
    832 static int	wm_init_txrx_queues(struct wm_softc *);
    833 /* Start */
    834 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    835     struct wm_txsoft *, uint32_t *, uint8_t *);
    836 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    837 static void	wm_start(struct ifnet *);
    838 static void	wm_start_locked(struct ifnet *);
    839 static int	wm_transmit(struct ifnet *, struct mbuf *);
    840 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    841 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    842 		    bool);
    843 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    844     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    845 static void	wm_nq_start(struct ifnet *);
    846 static void	wm_nq_start_locked(struct ifnet *);
    847 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    848 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    849 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    850 		    bool);
    851 static void	wm_deferred_start_locked(struct wm_txqueue *);
    852 static void	wm_handle_queue(void *);
    853 static void	wm_handle_queue_work(struct work *, void *);
    854 /* Interrupt */
    855 static bool	wm_txeof(struct wm_txqueue *, u_int);
    856 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    857 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    858 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    859 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    860 static void	wm_linkintr(struct wm_softc *, uint32_t);
    861 static int	wm_intr_legacy(void *);
    862 static inline void	wm_txrxintr_disable(struct wm_queue *);
    863 static inline void	wm_txrxintr_enable(struct wm_queue *);
    864 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    865 static int	wm_txrxintr_msix(void *);
    866 static int	wm_linkintr_msix(void *);
    867 
    868 /*
    869  * Media related.
    870  * GMII, SGMII, TBI, SERDES and SFP.
    871  */
    872 /* Common */
    873 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    874 /* GMII related */
    875 static void	wm_gmii_reset(struct wm_softc *);
    876 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    877 static int	wm_get_phy_id_82575(struct wm_softc *);
    878 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    879 static int	wm_gmii_mediachange(struct ifnet *);
    880 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    881 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    882 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    883 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    884 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    885 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    886 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    887 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    888 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    889 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    890 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    891 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    892 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    893 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    894 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    895 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    896 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    897 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    898 	bool);
    899 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    900 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    901 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    902 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    903 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    904 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    905 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    906 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    907 static void	wm_gmii_statchg(struct ifnet *);
    908 /*
    909  * kumeran related (80003, ICH* and PCH*).
    910  * These functions are not for accessing MII registers but for accessing
    911  * kumeran specific registers.
    912  */
    913 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    914 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    915 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    916 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    917 /* EMI register related */
    918 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    919 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    920 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    921 /* SGMII */
    922 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    923 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    924 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    925 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    926 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    927 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    928 /* TBI related */
    929 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    930 static void	wm_tbi_mediainit(struct wm_softc *);
    931 static int	wm_tbi_mediachange(struct ifnet *);
    932 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    933 static int	wm_check_for_link(struct wm_softc *);
    934 static void	wm_tbi_tick(struct wm_softc *);
    935 /* SERDES related */
    936 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    937 static int	wm_serdes_mediachange(struct ifnet *);
    938 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    939 static void	wm_serdes_tick(struct wm_softc *);
    940 /* SFP related */
    941 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    942 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    943 
    944 /*
    945  * NVM related.
    946  * Microwire, SPI (w/wo EERD) and Flash.
    947  */
    948 /* Misc functions */
    949 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    950 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    951 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    952 /* Microwire */
    953 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    954 /* SPI */
    955 static int	wm_nvm_ready_spi(struct wm_softc *);
    956 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    957 /* Using with EERD */
    958 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    959 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    960 /* Flash */
    961 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    962     unsigned int *);
    963 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    964 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    965 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    966     uint32_t *);
    967 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    968 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    969 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    970 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    971 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    972 /* iNVM */
    973 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    974 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    975 /* Lock, detecting NVM type, validate checksum and read */
    976 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    977 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    978 static int	wm_nvm_validate_checksum(struct wm_softc *);
    979 static void	wm_nvm_version_invm(struct wm_softc *);
    980 static void	wm_nvm_version(struct wm_softc *);
    981 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    982 
    983 /*
    984  * Hardware semaphores.
    985  * Very complexed...
    986  */
    987 static int	wm_get_null(struct wm_softc *);
    988 static void	wm_put_null(struct wm_softc *);
    989 static int	wm_get_eecd(struct wm_softc *);
    990 static void	wm_put_eecd(struct wm_softc *);
    991 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    992 static void	wm_put_swsm_semaphore(struct wm_softc *);
    993 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    994 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    995 static int	wm_get_nvm_80003(struct wm_softc *);
    996 static void	wm_put_nvm_80003(struct wm_softc *);
    997 static int	wm_get_nvm_82571(struct wm_softc *);
    998 static void	wm_put_nvm_82571(struct wm_softc *);
    999 static int	wm_get_phy_82575(struct wm_softc *);
   1000 static void	wm_put_phy_82575(struct wm_softc *);
   1001 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1002 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1003 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1004 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1005 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1006 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1007 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1008 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1009 
   1010 /*
   1011  * Management mode and power management related subroutines.
   1012  * BMC, AMT, suspend/resume and EEE.
   1013  */
   1014 #if 0
   1015 static int	wm_check_mng_mode(struct wm_softc *);
   1016 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1017 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1018 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1019 #endif
   1020 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1021 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1022 static void	wm_get_hw_control(struct wm_softc *);
   1023 static void	wm_release_hw_control(struct wm_softc *);
   1024 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1025 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1026 static void	wm_init_manageability(struct wm_softc *);
   1027 static void	wm_release_manageability(struct wm_softc *);
   1028 static void	wm_get_wakeup(struct wm_softc *);
   1029 static int	wm_ulp_disable(struct wm_softc *);
   1030 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1031 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1032 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1033 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1034 static void	wm_enable_wakeup(struct wm_softc *);
   1035 static void	wm_disable_aspm(struct wm_softc *);
   1036 /* LPLU (Low Power Link Up) */
   1037 static void	wm_lplu_d0_disable(struct wm_softc *);
   1038 /* EEE */
   1039 static int	wm_set_eee_i350(struct wm_softc *);
   1040 static int	wm_set_eee_pchlan(struct wm_softc *);
   1041 static int	wm_set_eee(struct wm_softc *);
   1042 
   1043 /*
   1044  * Workarounds (mainly PHY related).
   1045  * Basically, PHY's workarounds are in the PHY drivers.
   1046  */
   1047 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1048 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1049 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1050 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1051 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1052 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1053 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1054 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1055 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1056 static int	wm_k1_workaround_lv(struct wm_softc *);
   1057 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1058 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1059 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1060 static void	wm_reset_init_script_82575(struct wm_softc *);
   1061 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1062 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1063 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1064 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1065 static int	wm_pll_workaround_i210(struct wm_softc *);
   1066 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1067 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1068 static void	wm_set_linkdown_discard(struct wm_softc *);
   1069 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1070 
   1071 #ifdef WM_DEBUG
   1072 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1073 #endif
   1074 
   1075 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1076     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1077 
   1078 /*
   1079  * Devices supported by this driver.
   1080  */
   1081 static const struct wm_product {
   1082 	pci_vendor_id_t		wmp_vendor;
   1083 	pci_product_id_t	wmp_product;
   1084 	const char		*wmp_name;
   1085 	wm_chip_type		wmp_type;
   1086 	uint32_t		wmp_flags;
   1087 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1088 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1089 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1090 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1091 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1092 } wm_products[] = {
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1094 	  "Intel i82542 1000BASE-X Ethernet",
   1095 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1096 
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1098 	  "Intel i82543GC 1000BASE-X Ethernet",
   1099 	  WM_T_82543,		WMP_F_FIBER },
   1100 
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1102 	  "Intel i82543GC 1000BASE-T Ethernet",
   1103 	  WM_T_82543,		WMP_F_COPPER },
   1104 
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1106 	  "Intel i82544EI 1000BASE-T Ethernet",
   1107 	  WM_T_82544,		WMP_F_COPPER },
   1108 
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1110 	  "Intel i82544EI 1000BASE-X Ethernet",
   1111 	  WM_T_82544,		WMP_F_FIBER },
   1112 
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1114 	  "Intel i82544GC 1000BASE-T Ethernet",
   1115 	  WM_T_82544,		WMP_F_COPPER },
   1116 
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1118 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1119 	  WM_T_82544,		WMP_F_COPPER },
   1120 
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1122 	  "Intel i82540EM 1000BASE-T Ethernet",
   1123 	  WM_T_82540,		WMP_F_COPPER },
   1124 
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1126 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1127 	  WM_T_82540,		WMP_F_COPPER },
   1128 
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1130 	  "Intel i82540EP 1000BASE-T Ethernet",
   1131 	  WM_T_82540,		WMP_F_COPPER },
   1132 
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1134 	  "Intel i82540EP 1000BASE-T Ethernet",
   1135 	  WM_T_82540,		WMP_F_COPPER },
   1136 
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1138 	  "Intel i82540EP 1000BASE-T Ethernet",
   1139 	  WM_T_82540,		WMP_F_COPPER },
   1140 
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1142 	  "Intel i82545EM 1000BASE-T Ethernet",
   1143 	  WM_T_82545,		WMP_F_COPPER },
   1144 
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1146 	  "Intel i82545GM 1000BASE-T Ethernet",
   1147 	  WM_T_82545_3,		WMP_F_COPPER },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1150 	  "Intel i82545GM 1000BASE-X Ethernet",
   1151 	  WM_T_82545_3,		WMP_F_FIBER },
   1152 
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1154 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1155 	  WM_T_82545_3,		WMP_F_SERDES },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1158 	  "Intel i82546EB 1000BASE-T Ethernet",
   1159 	  WM_T_82546,		WMP_F_COPPER },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1162 	  "Intel i82546EB 1000BASE-T Ethernet",
   1163 	  WM_T_82546,		WMP_F_COPPER },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1166 	  "Intel i82545EM 1000BASE-X Ethernet",
   1167 	  WM_T_82545,		WMP_F_FIBER },
   1168 
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1170 	  "Intel i82546EB 1000BASE-X Ethernet",
   1171 	  WM_T_82546,		WMP_F_FIBER },
   1172 
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1174 	  "Intel i82546GB 1000BASE-T Ethernet",
   1175 	  WM_T_82546_3,		WMP_F_COPPER },
   1176 
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1178 	  "Intel i82546GB 1000BASE-X Ethernet",
   1179 	  WM_T_82546_3,		WMP_F_FIBER },
   1180 
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1182 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1183 	  WM_T_82546_3,		WMP_F_SERDES },
   1184 
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1186 	  "i82546GB quad-port Gigabit Ethernet",
   1187 	  WM_T_82546_3,		WMP_F_COPPER },
   1188 
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1190 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1191 	  WM_T_82546_3,		WMP_F_COPPER },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1194 	  "Intel PRO/1000MT (82546GB)",
   1195 	  WM_T_82546_3,		WMP_F_COPPER },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1198 	  "Intel i82541EI 1000BASE-T Ethernet",
   1199 	  WM_T_82541,		WMP_F_COPPER },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1202 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1203 	  WM_T_82541,		WMP_F_COPPER },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1206 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1207 	  WM_T_82541,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1210 	  "Intel i82541ER 1000BASE-T Ethernet",
   1211 	  WM_T_82541_2,		WMP_F_COPPER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1214 	  "Intel i82541GI 1000BASE-T Ethernet",
   1215 	  WM_T_82541_2,		WMP_F_COPPER },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1218 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1219 	  WM_T_82541_2,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1222 	  "Intel i82541PI 1000BASE-T Ethernet",
   1223 	  WM_T_82541_2,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1226 	  "Intel i82547EI 1000BASE-T Ethernet",
   1227 	  WM_T_82547,		WMP_F_COPPER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1230 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1231 	  WM_T_82547,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1234 	  "Intel i82547GI 1000BASE-T Ethernet",
   1235 	  WM_T_82547_2,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1238 	  "Intel PRO/1000 PT (82571EB)",
   1239 	  WM_T_82571,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1242 	  "Intel PRO/1000 PF (82571EB)",
   1243 	  WM_T_82571,		WMP_F_FIBER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1246 	  "Intel PRO/1000 PB (82571EB)",
   1247 	  WM_T_82571,		WMP_F_SERDES },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1250 	  "Intel PRO/1000 QT (82571EB)",
   1251 	  WM_T_82571,		WMP_F_COPPER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1254 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1255 	  WM_T_82571,		WMP_F_COPPER },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1258 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1259 	  WM_T_82571,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1262 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1263 	  WM_T_82571,		WMP_F_SERDES },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1266 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1267 	  WM_T_82571,		WMP_F_SERDES },
   1268 
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1270 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1271 	  WM_T_82571,		WMP_F_FIBER },
   1272 
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1274 	  "Intel i82572EI 1000baseT Ethernet",
   1275 	  WM_T_82572,		WMP_F_COPPER },
   1276 
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1278 	  "Intel i82572EI 1000baseX Ethernet",
   1279 	  WM_T_82572,		WMP_F_FIBER },
   1280 
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1282 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1283 	  WM_T_82572,		WMP_F_SERDES },
   1284 
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1286 	  "Intel i82572EI 1000baseT Ethernet",
   1287 	  WM_T_82572,		WMP_F_COPPER },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1290 	  "Intel i82573E",
   1291 	  WM_T_82573,		WMP_F_COPPER },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1294 	  "Intel i82573E IAMT",
   1295 	  WM_T_82573,		WMP_F_COPPER },
   1296 
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1298 	  "Intel i82573L Gigabit Ethernet",
   1299 	  WM_T_82573,		WMP_F_COPPER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1302 	  "Intel i82574L",
   1303 	  WM_T_82574,		WMP_F_COPPER },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1306 	  "Intel i82574L",
   1307 	  WM_T_82574,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1310 	  "Intel i82583V",
   1311 	  WM_T_82583,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1314 	  "i80003 dual 1000baseT Ethernet",
   1315 	  WM_T_80003,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1318 	  "i80003 dual 1000baseX Ethernet",
   1319 	  WM_T_80003,		WMP_F_COPPER },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1322 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1323 	  WM_T_80003,		WMP_F_SERDES },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1326 	  "Intel i80003 1000baseT Ethernet",
   1327 	  WM_T_80003,		WMP_F_COPPER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1330 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1331 	  WM_T_80003,		WMP_F_SERDES },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1334 	  "Intel i82801H (M_AMT) LAN Controller",
   1335 	  WM_T_ICH8,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1337 	  "Intel i82801H (AMT) LAN Controller",
   1338 	  WM_T_ICH8,		WMP_F_COPPER },
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1340 	  "Intel i82801H LAN Controller",
   1341 	  WM_T_ICH8,		WMP_F_COPPER },
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1343 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1344 	  WM_T_ICH8,		WMP_F_COPPER },
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1346 	  "Intel i82801H (M) LAN Controller",
   1347 	  WM_T_ICH8,		WMP_F_COPPER },
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1349 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1350 	  WM_T_ICH8,		WMP_F_COPPER },
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1352 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1353 	  WM_T_ICH8,		WMP_F_COPPER },
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1355 	  "82567V-3 LAN Controller",
   1356 	  WM_T_ICH8,		WMP_F_COPPER },
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1358 	  "82801I (AMT) LAN Controller",
   1359 	  WM_T_ICH9,		WMP_F_COPPER },
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1361 	  "82801I 10/100 LAN Controller",
   1362 	  WM_T_ICH9,		WMP_F_COPPER },
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1364 	  "82801I (G) 10/100 LAN Controller",
   1365 	  WM_T_ICH9,		WMP_F_COPPER },
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1367 	  "82801I (GT) 10/100 LAN Controller",
   1368 	  WM_T_ICH9,		WMP_F_COPPER },
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1370 	  "82801I (C) LAN Controller",
   1371 	  WM_T_ICH9,		WMP_F_COPPER },
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1373 	  "82801I mobile LAN Controller",
   1374 	  WM_T_ICH9,		WMP_F_COPPER },
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1376 	  "82801I mobile (V) LAN Controller",
   1377 	  WM_T_ICH9,		WMP_F_COPPER },
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1379 	  "82801I mobile (AMT) LAN Controller",
   1380 	  WM_T_ICH9,		WMP_F_COPPER },
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1382 	  "82567LM-4 LAN Controller",
   1383 	  WM_T_ICH9,		WMP_F_COPPER },
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1385 	  "82567LM-2 LAN Controller",
   1386 	  WM_T_ICH10,		WMP_F_COPPER },
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1388 	  "82567LF-2 LAN Controller",
   1389 	  WM_T_ICH10,		WMP_F_COPPER },
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1391 	  "82567LM-3 LAN Controller",
   1392 	  WM_T_ICH10,		WMP_F_COPPER },
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1394 	  "82567LF-3 LAN Controller",
   1395 	  WM_T_ICH10,		WMP_F_COPPER },
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1397 	  "82567V-2 LAN Controller",
   1398 	  WM_T_ICH10,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1400 	  "82567V-3? LAN Controller",
   1401 	  WM_T_ICH10,		WMP_F_COPPER },
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1403 	  "HANKSVILLE LAN Controller",
   1404 	  WM_T_ICH10,		WMP_F_COPPER },
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1406 	  "PCH LAN (82577LM) Controller",
   1407 	  WM_T_PCH,		WMP_F_COPPER },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1409 	  "PCH LAN (82577LC) Controller",
   1410 	  WM_T_PCH,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1412 	  "PCH LAN (82578DM) Controller",
   1413 	  WM_T_PCH,		WMP_F_COPPER },
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1415 	  "PCH LAN (82578DC) Controller",
   1416 	  WM_T_PCH,		WMP_F_COPPER },
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1418 	  "PCH2 LAN (82579LM) Controller",
   1419 	  WM_T_PCH2,		WMP_F_COPPER },
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1421 	  "PCH2 LAN (82579V) Controller",
   1422 	  WM_T_PCH2,		WMP_F_COPPER },
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1424 	  "82575EB dual-1000baseT Ethernet",
   1425 	  WM_T_82575,		WMP_F_COPPER },
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1427 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1428 	  WM_T_82575,		WMP_F_SERDES },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1430 	  "82575GB quad-1000baseT Ethernet",
   1431 	  WM_T_82575,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1433 	  "82575GB quad-1000baseT Ethernet (PM)",
   1434 	  WM_T_82575,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1436 	  "82576 1000BaseT Ethernet",
   1437 	  WM_T_82576,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1439 	  "82576 1000BaseX Ethernet",
   1440 	  WM_T_82576,		WMP_F_FIBER },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1443 	  "82576 gigabit Ethernet (SERDES)",
   1444 	  WM_T_82576,		WMP_F_SERDES },
   1445 
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1447 	  "82576 quad-1000BaseT Ethernet",
   1448 	  WM_T_82576,		WMP_F_COPPER },
   1449 
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1451 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1452 	  WM_T_82576,		WMP_F_COPPER },
   1453 
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1455 	  "82576 gigabit Ethernet",
   1456 	  WM_T_82576,		WMP_F_COPPER },
   1457 
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1459 	  "82576 gigabit Ethernet (SERDES)",
   1460 	  WM_T_82576,		WMP_F_SERDES },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1462 	  "82576 quad-gigabit Ethernet (SERDES)",
   1463 	  WM_T_82576,		WMP_F_SERDES },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1466 	  "82580 1000BaseT Ethernet",
   1467 	  WM_T_82580,		WMP_F_COPPER },
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1469 	  "82580 1000BaseX Ethernet",
   1470 	  WM_T_82580,		WMP_F_FIBER },
   1471 
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1473 	  "82580 1000BaseT Ethernet (SERDES)",
   1474 	  WM_T_82580,		WMP_F_SERDES },
   1475 
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1477 	  "82580 gigabit Ethernet (SGMII)",
   1478 	  WM_T_82580,		WMP_F_COPPER },
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1480 	  "82580 dual-1000BaseT Ethernet",
   1481 	  WM_T_82580,		WMP_F_COPPER },
   1482 
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1484 	  "82580 quad-1000BaseX Ethernet",
   1485 	  WM_T_82580,		WMP_F_FIBER },
   1486 
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1488 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1489 	  WM_T_82580,		WMP_F_COPPER },
   1490 
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1492 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1493 	  WM_T_82580,		WMP_F_SERDES },
   1494 
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1496 	  "DH89XXCC 1000BASE-KX Ethernet",
   1497 	  WM_T_82580,		WMP_F_SERDES },
   1498 
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1500 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1501 	  WM_T_82580,		WMP_F_SERDES },
   1502 
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1504 	  "I350 Gigabit Network Connection",
   1505 	  WM_T_I350,		WMP_F_COPPER },
   1506 
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1508 	  "I350 Gigabit Fiber Network Connection",
   1509 	  WM_T_I350,		WMP_F_FIBER },
   1510 
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1512 	  "I350 Gigabit Backplane Connection",
   1513 	  WM_T_I350,		WMP_F_SERDES },
   1514 
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1516 	  "I350 Quad Port Gigabit Ethernet",
   1517 	  WM_T_I350,		WMP_F_SERDES },
   1518 
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1520 	  "I350 Gigabit Connection",
   1521 	  WM_T_I350,		WMP_F_COPPER },
   1522 
   1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1524 	  "I354 Gigabit Ethernet (KX)",
   1525 	  WM_T_I354,		WMP_F_SERDES },
   1526 
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1528 	  "I354 Gigabit Ethernet (SGMII)",
   1529 	  WM_T_I354,		WMP_F_COPPER },
   1530 
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1532 	  "I354 Gigabit Ethernet (2.5G)",
   1533 	  WM_T_I354,		WMP_F_COPPER },
   1534 
   1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1536 	  "I210-T1 Ethernet Server Adapter",
   1537 	  WM_T_I210,		WMP_F_COPPER },
   1538 
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1540 	  "I210 Ethernet (Copper OEM)",
   1541 	  WM_T_I210,		WMP_F_COPPER },
   1542 
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1544 	  "I210 Ethernet (Copper IT)",
   1545 	  WM_T_I210,		WMP_F_COPPER },
   1546 
   1547 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1548 	  "I210 Ethernet (Copper, FLASH less)",
   1549 	  WM_T_I210,		WMP_F_COPPER },
   1550 
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1552 	  "I210 Gigabit Ethernet (Fiber)",
   1553 	  WM_T_I210,		WMP_F_FIBER },
   1554 
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1556 	  "I210 Gigabit Ethernet (SERDES)",
   1557 	  WM_T_I210,		WMP_F_SERDES },
   1558 
   1559 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1560 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1561 	  WM_T_I210,		WMP_F_SERDES },
   1562 
   1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1564 	  "I210 Gigabit Ethernet (SGMII)",
   1565 	  WM_T_I210,		WMP_F_COPPER },
   1566 
   1567 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1568 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1569 	  WM_T_I210,		WMP_F_COPPER },
   1570 
   1571 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1572 	  "I211 Ethernet (COPPER)",
   1573 	  WM_T_I211,		WMP_F_COPPER },
   1574 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1575 	  "I217 V Ethernet Connection",
   1576 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1577 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1578 	  "I217 LM Ethernet Connection",
   1579 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1580 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1581 	  "I218 V Ethernet Connection",
   1582 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1583 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1584 	  "I218 V Ethernet Connection",
   1585 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1586 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1587 	  "I218 V Ethernet Connection",
   1588 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1589 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1590 	  "I218 LM Ethernet Connection",
   1591 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1592 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1593 	  "I218 LM Ethernet Connection",
   1594 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1595 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1596 	  "I218 LM Ethernet Connection",
   1597 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1598 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1599 	  "I219 LM Ethernet Connection",
   1600 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1601 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1602 	  "I219 LM Ethernet Connection",
   1603 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1604 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1605 	  "I219 LM Ethernet Connection",
   1606 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1607 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1608 	  "I219 LM Ethernet Connection",
   1609 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1610 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1611 	  "I219 LM Ethernet Connection",
   1612 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1613 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1614 	  "I219 LM Ethernet Connection",
   1615 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1616 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1617 	  "I219 LM Ethernet Connection",
   1618 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1619 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1620 	  "I219 LM Ethernet Connection",
   1621 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1622 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1623 	  "I219 LM Ethernet Connection",
   1624 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1625 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1626 	  "I219 LM Ethernet Connection",
   1627 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1628 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1629 	  "I219 LM Ethernet Connection",
   1630 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1632 	  "I219 LM Ethernet Connection",
   1633 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1635 	  "I219 LM Ethernet Connection",
   1636 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1638 	  "I219 LM Ethernet Connection",
   1639 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1640 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1641 	  "I219 LM Ethernet Connection",
   1642 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1644 	  "I219 V Ethernet Connection",
   1645 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1647 	  "I219 V Ethernet Connection",
   1648 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1650 	  "I219 V Ethernet Connection",
   1651 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1652 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1653 	  "I219 V Ethernet Connection",
   1654 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1656 	  "I219 V Ethernet Connection",
   1657 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1659 	  "I219 V Ethernet Connection",
   1660 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1662 	  "I219 V Ethernet Connection",
   1663 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1664 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1665 	  "I219 V Ethernet Connection",
   1666 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1668 	  "I219 V Ethernet Connection",
   1669 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1671 	  "I219 V Ethernet Connection",
   1672 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1674 	  "I219 V Ethernet Connection",
   1675 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1676 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1677 	  "I219 V Ethernet Connection",
   1678 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1679 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1680 	  "I219 V Ethernet Connection",
   1681 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1682 	{ 0,			0,
   1683 	  NULL,
   1684 	  0,			0 },
   1685 };
   1686 
   1687 /*
   1688  * Register read/write functions.
   1689  * Other than CSR_{READ|WRITE}().
   1690  */
   1691 
   1692 #if 0 /* Not currently used */
   1693 static inline uint32_t
   1694 wm_io_read(struct wm_softc *sc, int reg)
   1695 {
   1696 
   1697 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1698 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1699 }
   1700 #endif
   1701 
   1702 static inline void
   1703 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1704 {
   1705 
   1706 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1707 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1708 }
   1709 
   1710 static inline void
   1711 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1712     uint32_t data)
   1713 {
   1714 	uint32_t regval;
   1715 	int i;
   1716 
   1717 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1718 
   1719 	CSR_WRITE(sc, reg, regval);
   1720 
   1721 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1722 		delay(5);
   1723 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1724 			break;
   1725 	}
   1726 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1727 		aprint_error("%s: WARNING:"
   1728 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1729 		    device_xname(sc->sc_dev), reg);
   1730 	}
   1731 }
   1732 
   1733 static inline void
   1734 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1735 {
   1736 	wa->wa_low = htole32(v & 0xffffffffU);
   1737 	if (sizeof(bus_addr_t) == 8)
   1738 		wa->wa_high = htole32((uint64_t) v >> 32);
   1739 	else
   1740 		wa->wa_high = 0;
   1741 }
   1742 
   1743 /*
   1744  * Descriptor sync/init functions.
   1745  */
   1746 static inline void
   1747 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1748 {
   1749 	struct wm_softc *sc = txq->txq_sc;
   1750 
   1751 	/* If it will wrap around, sync to the end of the ring. */
   1752 	if ((start + num) > WM_NTXDESC(txq)) {
   1753 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1754 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1755 		    (WM_NTXDESC(txq) - start), ops);
   1756 		num -= (WM_NTXDESC(txq) - start);
   1757 		start = 0;
   1758 	}
   1759 
   1760 	/* Now sync whatever is left. */
   1761 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1762 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1763 }
   1764 
   1765 static inline void
   1766 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1767 {
   1768 	struct wm_softc *sc = rxq->rxq_sc;
   1769 
   1770 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1771 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1772 }
   1773 
   1774 static inline void
   1775 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1776 {
   1777 	struct wm_softc *sc = rxq->rxq_sc;
   1778 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1779 	struct mbuf *m = rxs->rxs_mbuf;
   1780 
   1781 	/*
   1782 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1783 	 * so that the payload after the Ethernet header is aligned
   1784 	 * to a 4-byte boundary.
   1785 
   1786 	 * XXX BRAINDAMAGE ALERT!
   1787 	 * The stupid chip uses the same size for every buffer, which
   1788 	 * is set in the Receive Control register.  We are using the 2K
   1789 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1790 	 * reason, we can't "scoot" packets longer than the standard
   1791 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1792 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1793 	 * the upper layer copy the headers.
   1794 	 */
   1795 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1796 
   1797 	if (sc->sc_type == WM_T_82574) {
   1798 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1799 		rxd->erx_data.erxd_addr =
   1800 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1801 		rxd->erx_data.erxd_dd = 0;
   1802 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1803 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1804 
   1805 		rxd->nqrx_data.nrxd_paddr =
   1806 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1807 		/* Currently, split header is not supported. */
   1808 		rxd->nqrx_data.nrxd_haddr = 0;
   1809 	} else {
   1810 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1811 
   1812 		wm_set_dma_addr(&rxd->wrx_addr,
   1813 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1814 		rxd->wrx_len = 0;
   1815 		rxd->wrx_cksum = 0;
   1816 		rxd->wrx_status = 0;
   1817 		rxd->wrx_errors = 0;
   1818 		rxd->wrx_special = 0;
   1819 	}
   1820 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1821 
   1822 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1823 }
   1824 
   1825 /*
   1826  * Device driver interface functions and commonly used functions.
   1827  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1828  */
   1829 
   1830 /* Lookup supported device table */
   1831 static const struct wm_product *
   1832 wm_lookup(const struct pci_attach_args *pa)
   1833 {
   1834 	const struct wm_product *wmp;
   1835 
   1836 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1837 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1838 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1839 			return wmp;
   1840 	}
   1841 	return NULL;
   1842 }
   1843 
   1844 /* The match function (ca_match) */
   1845 static int
   1846 wm_match(device_t parent, cfdata_t cf, void *aux)
   1847 {
   1848 	struct pci_attach_args *pa = aux;
   1849 
   1850 	if (wm_lookup(pa) != NULL)
   1851 		return 1;
   1852 
   1853 	return 0;
   1854 }
   1855 
   1856 /* The attach function (ca_attach) */
   1857 static void
   1858 wm_attach(device_t parent, device_t self, void *aux)
   1859 {
   1860 	struct wm_softc *sc = device_private(self);
   1861 	struct pci_attach_args *pa = aux;
   1862 	prop_dictionary_t dict;
   1863 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1864 	pci_chipset_tag_t pc = pa->pa_pc;
   1865 	int counts[PCI_INTR_TYPE_SIZE];
   1866 	pci_intr_type_t max_type;
   1867 	const char *eetype, *xname;
   1868 	bus_space_tag_t memt;
   1869 	bus_space_handle_t memh;
   1870 	bus_size_t memsize;
   1871 	int memh_valid;
   1872 	int i, error;
   1873 	const struct wm_product *wmp;
   1874 	prop_data_t ea;
   1875 	prop_number_t pn;
   1876 	uint8_t enaddr[ETHER_ADDR_LEN];
   1877 	char buf[256];
   1878 	char wqname[MAXCOMLEN];
   1879 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1880 	pcireg_t preg, memtype;
   1881 	uint16_t eeprom_data, apme_mask;
   1882 	bool force_clear_smbi;
   1883 	uint32_t link_mode;
   1884 	uint32_t reg;
   1885 
   1886 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1887 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1888 #endif
   1889 	sc->sc_dev = self;
   1890 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1891 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1892 	sc->sc_core_stopping = false;
   1893 
   1894 	wmp = wm_lookup(pa);
   1895 #ifdef DIAGNOSTIC
   1896 	if (wmp == NULL) {
   1897 		printf("\n");
   1898 		panic("wm_attach: impossible");
   1899 	}
   1900 #endif
   1901 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1902 
   1903 	sc->sc_pc = pa->pa_pc;
   1904 	sc->sc_pcitag = pa->pa_tag;
   1905 
   1906 	if (pci_dma64_available(pa))
   1907 		sc->sc_dmat = pa->pa_dmat64;
   1908 	else
   1909 		sc->sc_dmat = pa->pa_dmat;
   1910 
   1911 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1912 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1913 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1914 
   1915 	sc->sc_type = wmp->wmp_type;
   1916 
   1917 	/* Set default function pointers */
   1918 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1919 	sc->phy.release = sc->nvm.release = wm_put_null;
   1920 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1921 
   1922 	if (sc->sc_type < WM_T_82543) {
   1923 		if (sc->sc_rev < 2) {
   1924 			aprint_error_dev(sc->sc_dev,
   1925 			    "i82542 must be at least rev. 2\n");
   1926 			return;
   1927 		}
   1928 		if (sc->sc_rev < 3)
   1929 			sc->sc_type = WM_T_82542_2_0;
   1930 	}
   1931 
   1932 	/*
   1933 	 * Disable MSI for Errata:
   1934 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1935 	 *
   1936 	 *  82544: Errata 25
   1937 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1938 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1939 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1940 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1941 	 *
   1942 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1943 	 *
   1944 	 *  82571 & 82572: Errata 63
   1945 	 */
   1946 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1947 	    || (sc->sc_type == WM_T_82572))
   1948 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1949 
   1950 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1951 	    || (sc->sc_type == WM_T_82580)
   1952 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1953 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1954 		sc->sc_flags |= WM_F_NEWQUEUE;
   1955 
   1956 	/* Set device properties (mactype) */
   1957 	dict = device_properties(sc->sc_dev);
   1958 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1959 
   1960 	/*
   1961 	 * Map the device.  All devices support memory-mapped acccess,
   1962 	 * and it is really required for normal operation.
   1963 	 */
   1964 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1965 	switch (memtype) {
   1966 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1967 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1968 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1969 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1970 		break;
   1971 	default:
   1972 		memh_valid = 0;
   1973 		break;
   1974 	}
   1975 
   1976 	if (memh_valid) {
   1977 		sc->sc_st = memt;
   1978 		sc->sc_sh = memh;
   1979 		sc->sc_ss = memsize;
   1980 	} else {
   1981 		aprint_error_dev(sc->sc_dev,
   1982 		    "unable to map device registers\n");
   1983 		return;
   1984 	}
   1985 
   1986 	/*
   1987 	 * In addition, i82544 and later support I/O mapped indirect
   1988 	 * register access.  It is not desirable (nor supported in
   1989 	 * this driver) to use it for normal operation, though it is
   1990 	 * required to work around bugs in some chip versions.
   1991 	 */
   1992 	if (sc->sc_type >= WM_T_82544) {
   1993 		/* First we have to find the I/O BAR. */
   1994 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1995 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1996 			if (memtype == PCI_MAPREG_TYPE_IO)
   1997 				break;
   1998 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1999 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2000 				i += 4;	/* skip high bits, too */
   2001 		}
   2002 		if (i < PCI_MAPREG_END) {
   2003 			/*
   2004 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2005 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2006 			 * It's no problem because newer chips has no this
   2007 			 * bug.
   2008 			 *
   2009 			 * The i8254x doesn't apparently respond when the
   2010 			 * I/O BAR is 0, which looks somewhat like it's not
   2011 			 * been configured.
   2012 			 */
   2013 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2014 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2015 				aprint_error_dev(sc->sc_dev,
   2016 				    "WARNING: I/O BAR at zero.\n");
   2017 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2018 					0, &sc->sc_iot, &sc->sc_ioh,
   2019 					NULL, &sc->sc_ios) == 0) {
   2020 				sc->sc_flags |= WM_F_IOH_VALID;
   2021 			} else
   2022 				aprint_error_dev(sc->sc_dev,
   2023 				    "WARNING: unable to map I/O space\n");
   2024 		}
   2025 
   2026 	}
   2027 
   2028 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2029 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2030 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2031 	if (sc->sc_type < WM_T_82542_2_1)
   2032 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2033 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2034 
   2035 	/* Power up chip */
   2036 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2037 	    && error != EOPNOTSUPP) {
   2038 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2039 		return;
   2040 	}
   2041 
   2042 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2043 	/*
   2044 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2045 	 * resource.
   2046 	 */
   2047 	if (sc->sc_nqueues > 1) {
   2048 		max_type = PCI_INTR_TYPE_MSIX;
   2049 		/*
   2050 		 *  82583 has a MSI-X capability in the PCI configuration space
   2051 		 * but it doesn't support it. At least the document doesn't
   2052 		 * say anything about MSI-X.
   2053 		 */
   2054 		counts[PCI_INTR_TYPE_MSIX]
   2055 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2056 	} else {
   2057 		max_type = PCI_INTR_TYPE_MSI;
   2058 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2059 	}
   2060 
   2061 	/* Allocation settings */
   2062 	counts[PCI_INTR_TYPE_MSI] = 1;
   2063 	counts[PCI_INTR_TYPE_INTX] = 1;
   2064 	/* overridden by disable flags */
   2065 	if (wm_disable_msi != 0) {
   2066 		counts[PCI_INTR_TYPE_MSI] = 0;
   2067 		if (wm_disable_msix != 0) {
   2068 			max_type = PCI_INTR_TYPE_INTX;
   2069 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2070 		}
   2071 	} else if (wm_disable_msix != 0) {
   2072 		max_type = PCI_INTR_TYPE_MSI;
   2073 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2074 	}
   2075 
   2076 alloc_retry:
   2077 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2078 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2079 		return;
   2080 	}
   2081 
   2082 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2083 		error = wm_setup_msix(sc);
   2084 		if (error) {
   2085 			pci_intr_release(pc, sc->sc_intrs,
   2086 			    counts[PCI_INTR_TYPE_MSIX]);
   2087 
   2088 			/* Setup for MSI: Disable MSI-X */
   2089 			max_type = PCI_INTR_TYPE_MSI;
   2090 			counts[PCI_INTR_TYPE_MSI] = 1;
   2091 			counts[PCI_INTR_TYPE_INTX] = 1;
   2092 			goto alloc_retry;
   2093 		}
   2094 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2095 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2096 		error = wm_setup_legacy(sc);
   2097 		if (error) {
   2098 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2099 			    counts[PCI_INTR_TYPE_MSI]);
   2100 
   2101 			/* The next try is for INTx: Disable MSI */
   2102 			max_type = PCI_INTR_TYPE_INTX;
   2103 			counts[PCI_INTR_TYPE_INTX] = 1;
   2104 			goto alloc_retry;
   2105 		}
   2106 	} else {
   2107 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2108 		error = wm_setup_legacy(sc);
   2109 		if (error) {
   2110 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2111 			    counts[PCI_INTR_TYPE_INTX]);
   2112 			return;
   2113 		}
   2114 	}
   2115 
   2116 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2117 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2118 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2119 	    WM_WORKQUEUE_FLAGS);
   2120 	if (error) {
   2121 		aprint_error_dev(sc->sc_dev,
   2122 		    "unable to create workqueue\n");
   2123 		goto out;
   2124 	}
   2125 
   2126 	/*
   2127 	 * Check the function ID (unit number of the chip).
   2128 	 */
   2129 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2130 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2131 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2132 	    || (sc->sc_type == WM_T_82580)
   2133 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2134 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2135 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2136 	else
   2137 		sc->sc_funcid = 0;
   2138 
   2139 	/*
   2140 	 * Determine a few things about the bus we're connected to.
   2141 	 */
   2142 	if (sc->sc_type < WM_T_82543) {
   2143 		/* We don't really know the bus characteristics here. */
   2144 		sc->sc_bus_speed = 33;
   2145 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2146 		/*
   2147 		 * CSA (Communication Streaming Architecture) is about as fast
   2148 		 * a 32-bit 66MHz PCI Bus.
   2149 		 */
   2150 		sc->sc_flags |= WM_F_CSA;
   2151 		sc->sc_bus_speed = 66;
   2152 		aprint_verbose_dev(sc->sc_dev,
   2153 		    "Communication Streaming Architecture\n");
   2154 		if (sc->sc_type == WM_T_82547) {
   2155 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2156 			callout_setfunc(&sc->sc_txfifo_ch,
   2157 			    wm_82547_txfifo_stall, sc);
   2158 			aprint_verbose_dev(sc->sc_dev,
   2159 			    "using 82547 Tx FIFO stall work-around\n");
   2160 		}
   2161 	} else if (sc->sc_type >= WM_T_82571) {
   2162 		sc->sc_flags |= WM_F_PCIE;
   2163 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2164 		    && (sc->sc_type != WM_T_ICH10)
   2165 		    && (sc->sc_type != WM_T_PCH)
   2166 		    && (sc->sc_type != WM_T_PCH2)
   2167 		    && (sc->sc_type != WM_T_PCH_LPT)
   2168 		    && (sc->sc_type != WM_T_PCH_SPT)
   2169 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2170 			/* ICH* and PCH* have no PCIe capability registers */
   2171 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2172 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2173 				NULL) == 0)
   2174 				aprint_error_dev(sc->sc_dev,
   2175 				    "unable to find PCIe capability\n");
   2176 		}
   2177 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2178 	} else {
   2179 		reg = CSR_READ(sc, WMREG_STATUS);
   2180 		if (reg & STATUS_BUS64)
   2181 			sc->sc_flags |= WM_F_BUS64;
   2182 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2183 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2184 
   2185 			sc->sc_flags |= WM_F_PCIX;
   2186 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2187 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2188 				aprint_error_dev(sc->sc_dev,
   2189 				    "unable to find PCIX capability\n");
   2190 			else if (sc->sc_type != WM_T_82545_3 &&
   2191 				 sc->sc_type != WM_T_82546_3) {
   2192 				/*
   2193 				 * Work around a problem caused by the BIOS
   2194 				 * setting the max memory read byte count
   2195 				 * incorrectly.
   2196 				 */
   2197 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2198 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2199 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2200 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2201 
   2202 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2203 				    PCIX_CMD_BYTECNT_SHIFT;
   2204 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2205 				    PCIX_STATUS_MAXB_SHIFT;
   2206 				if (bytecnt > maxb) {
   2207 					aprint_verbose_dev(sc->sc_dev,
   2208 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2209 					    512 << bytecnt, 512 << maxb);
   2210 					pcix_cmd = (pcix_cmd &
   2211 					    ~PCIX_CMD_BYTECNT_MASK) |
   2212 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2213 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2214 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2215 					    pcix_cmd);
   2216 				}
   2217 			}
   2218 		}
   2219 		/*
   2220 		 * The quad port adapter is special; it has a PCIX-PCIX
   2221 		 * bridge on the board, and can run the secondary bus at
   2222 		 * a higher speed.
   2223 		 */
   2224 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2225 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2226 								      : 66;
   2227 		} else if (sc->sc_flags & WM_F_PCIX) {
   2228 			switch (reg & STATUS_PCIXSPD_MASK) {
   2229 			case STATUS_PCIXSPD_50_66:
   2230 				sc->sc_bus_speed = 66;
   2231 				break;
   2232 			case STATUS_PCIXSPD_66_100:
   2233 				sc->sc_bus_speed = 100;
   2234 				break;
   2235 			case STATUS_PCIXSPD_100_133:
   2236 				sc->sc_bus_speed = 133;
   2237 				break;
   2238 			default:
   2239 				aprint_error_dev(sc->sc_dev,
   2240 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2241 				    reg & STATUS_PCIXSPD_MASK);
   2242 				sc->sc_bus_speed = 66;
   2243 				break;
   2244 			}
   2245 		} else
   2246 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2247 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2248 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2249 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2250 	}
   2251 
   2252 	/* clear interesting stat counters */
   2253 	CSR_READ(sc, WMREG_COLC);
   2254 	CSR_READ(sc, WMREG_RXERRC);
   2255 
   2256 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2257 	    || (sc->sc_type >= WM_T_ICH8))
   2258 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2259 	if (sc->sc_type >= WM_T_ICH8)
   2260 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2261 
   2262 	/* Set PHY, NVM mutex related stuff */
   2263 	switch (sc->sc_type) {
   2264 	case WM_T_82542_2_0:
   2265 	case WM_T_82542_2_1:
   2266 	case WM_T_82543:
   2267 	case WM_T_82544:
   2268 		/* Microwire */
   2269 		sc->nvm.read = wm_nvm_read_uwire;
   2270 		sc->sc_nvm_wordsize = 64;
   2271 		sc->sc_nvm_addrbits = 6;
   2272 		break;
   2273 	case WM_T_82540:
   2274 	case WM_T_82545:
   2275 	case WM_T_82545_3:
   2276 	case WM_T_82546:
   2277 	case WM_T_82546_3:
   2278 		/* Microwire */
   2279 		sc->nvm.read = wm_nvm_read_uwire;
   2280 		reg = CSR_READ(sc, WMREG_EECD);
   2281 		if (reg & EECD_EE_SIZE) {
   2282 			sc->sc_nvm_wordsize = 256;
   2283 			sc->sc_nvm_addrbits = 8;
   2284 		} else {
   2285 			sc->sc_nvm_wordsize = 64;
   2286 			sc->sc_nvm_addrbits = 6;
   2287 		}
   2288 		sc->sc_flags |= WM_F_LOCK_EECD;
   2289 		sc->nvm.acquire = wm_get_eecd;
   2290 		sc->nvm.release = wm_put_eecd;
   2291 		break;
   2292 	case WM_T_82541:
   2293 	case WM_T_82541_2:
   2294 	case WM_T_82547:
   2295 	case WM_T_82547_2:
   2296 		reg = CSR_READ(sc, WMREG_EECD);
   2297 		/*
   2298 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2299 		 * on 8254[17], so set flags and functios before calling it.
   2300 		 */
   2301 		sc->sc_flags |= WM_F_LOCK_EECD;
   2302 		sc->nvm.acquire = wm_get_eecd;
   2303 		sc->nvm.release = wm_put_eecd;
   2304 		if (reg & EECD_EE_TYPE) {
   2305 			/* SPI */
   2306 			sc->nvm.read = wm_nvm_read_spi;
   2307 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2308 			wm_nvm_set_addrbits_size_eecd(sc);
   2309 		} else {
   2310 			/* Microwire */
   2311 			sc->nvm.read = wm_nvm_read_uwire;
   2312 			if ((reg & EECD_EE_ABITS) != 0) {
   2313 				sc->sc_nvm_wordsize = 256;
   2314 				sc->sc_nvm_addrbits = 8;
   2315 			} else {
   2316 				sc->sc_nvm_wordsize = 64;
   2317 				sc->sc_nvm_addrbits = 6;
   2318 			}
   2319 		}
   2320 		break;
   2321 	case WM_T_82571:
   2322 	case WM_T_82572:
   2323 		/* SPI */
   2324 		sc->nvm.read = wm_nvm_read_eerd;
   2325 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2326 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2327 		wm_nvm_set_addrbits_size_eecd(sc);
   2328 		sc->phy.acquire = wm_get_swsm_semaphore;
   2329 		sc->phy.release = wm_put_swsm_semaphore;
   2330 		sc->nvm.acquire = wm_get_nvm_82571;
   2331 		sc->nvm.release = wm_put_nvm_82571;
   2332 		break;
   2333 	case WM_T_82573:
   2334 	case WM_T_82574:
   2335 	case WM_T_82583:
   2336 		sc->nvm.read = wm_nvm_read_eerd;
   2337 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2338 		if (sc->sc_type == WM_T_82573) {
   2339 			sc->phy.acquire = wm_get_swsm_semaphore;
   2340 			sc->phy.release = wm_put_swsm_semaphore;
   2341 			sc->nvm.acquire = wm_get_nvm_82571;
   2342 			sc->nvm.release = wm_put_nvm_82571;
   2343 		} else {
   2344 			/* Both PHY and NVM use the same semaphore. */
   2345 			sc->phy.acquire = sc->nvm.acquire
   2346 			    = wm_get_swfwhw_semaphore;
   2347 			sc->phy.release = sc->nvm.release
   2348 			    = wm_put_swfwhw_semaphore;
   2349 		}
   2350 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2351 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2352 			sc->sc_nvm_wordsize = 2048;
   2353 		} else {
   2354 			/* SPI */
   2355 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2356 			wm_nvm_set_addrbits_size_eecd(sc);
   2357 		}
   2358 		break;
   2359 	case WM_T_82575:
   2360 	case WM_T_82576:
   2361 	case WM_T_82580:
   2362 	case WM_T_I350:
   2363 	case WM_T_I354:
   2364 	case WM_T_80003:
   2365 		/* SPI */
   2366 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2367 		wm_nvm_set_addrbits_size_eecd(sc);
   2368 		if ((sc->sc_type == WM_T_80003)
   2369 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2370 			sc->nvm.read = wm_nvm_read_eerd;
   2371 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2372 		} else {
   2373 			sc->nvm.read = wm_nvm_read_spi;
   2374 			sc->sc_flags |= WM_F_LOCK_EECD;
   2375 		}
   2376 		sc->phy.acquire = wm_get_phy_82575;
   2377 		sc->phy.release = wm_put_phy_82575;
   2378 		sc->nvm.acquire = wm_get_nvm_80003;
   2379 		sc->nvm.release = wm_put_nvm_80003;
   2380 		break;
   2381 	case WM_T_ICH8:
   2382 	case WM_T_ICH9:
   2383 	case WM_T_ICH10:
   2384 	case WM_T_PCH:
   2385 	case WM_T_PCH2:
   2386 	case WM_T_PCH_LPT:
   2387 		sc->nvm.read = wm_nvm_read_ich8;
   2388 		/* FLASH */
   2389 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2390 		sc->sc_nvm_wordsize = 2048;
   2391 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2392 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2393 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2394 			aprint_error_dev(sc->sc_dev,
   2395 			    "can't map FLASH registers\n");
   2396 			goto out;
   2397 		}
   2398 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2399 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2400 		    ICH_FLASH_SECTOR_SIZE;
   2401 		sc->sc_ich8_flash_bank_size =
   2402 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2403 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2404 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2405 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2406 		sc->sc_flashreg_offset = 0;
   2407 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2408 		sc->phy.release = wm_put_swflag_ich8lan;
   2409 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2410 		sc->nvm.release = wm_put_nvm_ich8lan;
   2411 		break;
   2412 	case WM_T_PCH_SPT:
   2413 	case WM_T_PCH_CNP:
   2414 		sc->nvm.read = wm_nvm_read_spt;
   2415 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2416 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2417 		sc->sc_flasht = sc->sc_st;
   2418 		sc->sc_flashh = sc->sc_sh;
   2419 		sc->sc_ich8_flash_base = 0;
   2420 		sc->sc_nvm_wordsize =
   2421 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2422 		    * NVM_SIZE_MULTIPLIER;
   2423 		/* It is size in bytes, we want words */
   2424 		sc->sc_nvm_wordsize /= 2;
   2425 		/* Assume 2 banks */
   2426 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2427 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2428 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2429 		sc->phy.release = wm_put_swflag_ich8lan;
   2430 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2431 		sc->nvm.release = wm_put_nvm_ich8lan;
   2432 		break;
   2433 	case WM_T_I210:
   2434 	case WM_T_I211:
   2435 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2436 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2437 		if (wm_nvm_flash_presence_i210(sc)) {
   2438 			sc->nvm.read = wm_nvm_read_eerd;
   2439 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2440 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2441 			wm_nvm_set_addrbits_size_eecd(sc);
   2442 		} else {
   2443 			sc->nvm.read = wm_nvm_read_invm;
   2444 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2445 			sc->sc_nvm_wordsize = INVM_SIZE;
   2446 		}
   2447 		sc->phy.acquire = wm_get_phy_82575;
   2448 		sc->phy.release = wm_put_phy_82575;
   2449 		sc->nvm.acquire = wm_get_nvm_80003;
   2450 		sc->nvm.release = wm_put_nvm_80003;
   2451 		break;
   2452 	default:
   2453 		break;
   2454 	}
   2455 
   2456 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2457 	switch (sc->sc_type) {
   2458 	case WM_T_82571:
   2459 	case WM_T_82572:
   2460 		reg = CSR_READ(sc, WMREG_SWSM2);
   2461 		if ((reg & SWSM2_LOCK) == 0) {
   2462 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2463 			force_clear_smbi = true;
   2464 		} else
   2465 			force_clear_smbi = false;
   2466 		break;
   2467 	case WM_T_82573:
   2468 	case WM_T_82574:
   2469 	case WM_T_82583:
   2470 		force_clear_smbi = true;
   2471 		break;
   2472 	default:
   2473 		force_clear_smbi = false;
   2474 		break;
   2475 	}
   2476 	if (force_clear_smbi) {
   2477 		reg = CSR_READ(sc, WMREG_SWSM);
   2478 		if ((reg & SWSM_SMBI) != 0)
   2479 			aprint_error_dev(sc->sc_dev,
   2480 			    "Please update the Bootagent\n");
   2481 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2482 	}
   2483 
   2484 	/*
   2485 	 * Defer printing the EEPROM type until after verifying the checksum
   2486 	 * This allows the EEPROM type to be printed correctly in the case
   2487 	 * that no EEPROM is attached.
   2488 	 */
   2489 	/*
   2490 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2491 	 * this for later, so we can fail future reads from the EEPROM.
   2492 	 */
   2493 	if (wm_nvm_validate_checksum(sc)) {
   2494 		/*
   2495 		 * Read twice again because some PCI-e parts fail the
   2496 		 * first check due to the link being in sleep state.
   2497 		 */
   2498 		if (wm_nvm_validate_checksum(sc))
   2499 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2500 	}
   2501 
   2502 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2503 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2504 	else {
   2505 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2506 		    sc->sc_nvm_wordsize);
   2507 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2508 			aprint_verbose("iNVM");
   2509 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2510 			aprint_verbose("FLASH(HW)");
   2511 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2512 			aprint_verbose("FLASH");
   2513 		else {
   2514 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2515 				eetype = "SPI";
   2516 			else
   2517 				eetype = "MicroWire";
   2518 			aprint_verbose("(%d address bits) %s EEPROM",
   2519 			    sc->sc_nvm_addrbits, eetype);
   2520 		}
   2521 	}
   2522 	wm_nvm_version(sc);
   2523 	aprint_verbose("\n");
   2524 
   2525 	/*
   2526 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2527 	 * incorrect.
   2528 	 */
   2529 	wm_gmii_setup_phytype(sc, 0, 0);
   2530 
   2531 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2532 	switch (sc->sc_type) {
   2533 	case WM_T_ICH8:
   2534 	case WM_T_ICH9:
   2535 	case WM_T_ICH10:
   2536 	case WM_T_PCH:
   2537 	case WM_T_PCH2:
   2538 	case WM_T_PCH_LPT:
   2539 	case WM_T_PCH_SPT:
   2540 	case WM_T_PCH_CNP:
   2541 		apme_mask = WUC_APME;
   2542 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2543 		if ((eeprom_data & apme_mask) != 0)
   2544 			sc->sc_flags |= WM_F_WOL;
   2545 		break;
   2546 	default:
   2547 		break;
   2548 	}
   2549 
   2550 	/* Reset the chip to a known state. */
   2551 	wm_reset(sc);
   2552 
   2553 	/*
   2554 	 * Check for I21[01] PLL workaround.
   2555 	 *
   2556 	 * Three cases:
   2557 	 * a) Chip is I211.
   2558 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2559 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2560 	 */
   2561 	if (sc->sc_type == WM_T_I211)
   2562 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2563 	if (sc->sc_type == WM_T_I210) {
   2564 		if (!wm_nvm_flash_presence_i210(sc))
   2565 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2566 		else if ((sc->sc_nvm_ver_major < 3)
   2567 		    || ((sc->sc_nvm_ver_major == 3)
   2568 			&& (sc->sc_nvm_ver_minor < 25))) {
   2569 			aprint_verbose_dev(sc->sc_dev,
   2570 			    "ROM image version %d.%d is older than 3.25\n",
   2571 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2572 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2573 		}
   2574 	}
   2575 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2576 		wm_pll_workaround_i210(sc);
   2577 
   2578 	wm_get_wakeup(sc);
   2579 
   2580 	/* Non-AMT based hardware can now take control from firmware */
   2581 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2582 		wm_get_hw_control(sc);
   2583 
   2584 	/*
   2585 	 * Read the Ethernet address from the EEPROM, if not first found
   2586 	 * in device properties.
   2587 	 */
   2588 	ea = prop_dictionary_get(dict, "mac-address");
   2589 	if (ea != NULL) {
   2590 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2591 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2592 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2593 	} else {
   2594 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2595 			aprint_error_dev(sc->sc_dev,
   2596 			    "unable to read Ethernet address\n");
   2597 			goto out;
   2598 		}
   2599 	}
   2600 
   2601 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2602 	    ether_sprintf(enaddr));
   2603 
   2604 	/*
   2605 	 * Read the config info from the EEPROM, and set up various
   2606 	 * bits in the control registers based on their contents.
   2607 	 */
   2608 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2609 	if (pn != NULL) {
   2610 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2611 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2612 	} else {
   2613 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2614 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2615 			goto out;
   2616 		}
   2617 	}
   2618 
   2619 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2620 	if (pn != NULL) {
   2621 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2622 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2623 	} else {
   2624 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2625 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2626 			goto out;
   2627 		}
   2628 	}
   2629 
   2630 	/* check for WM_F_WOL */
   2631 	switch (sc->sc_type) {
   2632 	case WM_T_82542_2_0:
   2633 	case WM_T_82542_2_1:
   2634 	case WM_T_82543:
   2635 		/* dummy? */
   2636 		eeprom_data = 0;
   2637 		apme_mask = NVM_CFG3_APME;
   2638 		break;
   2639 	case WM_T_82544:
   2640 		apme_mask = NVM_CFG2_82544_APM_EN;
   2641 		eeprom_data = cfg2;
   2642 		break;
   2643 	case WM_T_82546:
   2644 	case WM_T_82546_3:
   2645 	case WM_T_82571:
   2646 	case WM_T_82572:
   2647 	case WM_T_82573:
   2648 	case WM_T_82574:
   2649 	case WM_T_82583:
   2650 	case WM_T_80003:
   2651 	case WM_T_82575:
   2652 	case WM_T_82576:
   2653 		apme_mask = NVM_CFG3_APME;
   2654 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2655 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2656 		break;
   2657 	case WM_T_82580:
   2658 	case WM_T_I350:
   2659 	case WM_T_I354:
   2660 	case WM_T_I210:
   2661 	case WM_T_I211:
   2662 		apme_mask = NVM_CFG3_APME;
   2663 		wm_nvm_read(sc,
   2664 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2665 		    1, &eeprom_data);
   2666 		break;
   2667 	case WM_T_ICH8:
   2668 	case WM_T_ICH9:
   2669 	case WM_T_ICH10:
   2670 	case WM_T_PCH:
   2671 	case WM_T_PCH2:
   2672 	case WM_T_PCH_LPT:
   2673 	case WM_T_PCH_SPT:
   2674 	case WM_T_PCH_CNP:
   2675 		/* Already checked before wm_reset () */
   2676 		apme_mask = eeprom_data = 0;
   2677 		break;
   2678 	default: /* XXX 82540 */
   2679 		apme_mask = NVM_CFG3_APME;
   2680 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2681 		break;
   2682 	}
   2683 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2684 	if ((eeprom_data & apme_mask) != 0)
   2685 		sc->sc_flags |= WM_F_WOL;
   2686 
   2687 	/*
   2688 	 * We have the eeprom settings, now apply the special cases
   2689 	 * where the eeprom may be wrong or the board won't support
   2690 	 * wake on lan on a particular port
   2691 	 */
   2692 	switch (sc->sc_pcidevid) {
   2693 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2694 		sc->sc_flags &= ~WM_F_WOL;
   2695 		break;
   2696 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2697 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2698 		/* Wake events only supported on port A for dual fiber
   2699 		 * regardless of eeprom setting */
   2700 		if (sc->sc_funcid == 1)
   2701 			sc->sc_flags &= ~WM_F_WOL;
   2702 		break;
   2703 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2704 		/* If quad port adapter, disable WoL on all but port A */
   2705 		if (sc->sc_funcid != 0)
   2706 			sc->sc_flags &= ~WM_F_WOL;
   2707 		break;
   2708 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2709 		/* Wake events only supported on port A for dual fiber
   2710 		 * regardless of eeprom setting */
   2711 		if (sc->sc_funcid == 1)
   2712 			sc->sc_flags &= ~WM_F_WOL;
   2713 		break;
   2714 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2715 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2716 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2717 		/* If quad port adapter, disable WoL on all but port A */
   2718 		if (sc->sc_funcid != 0)
   2719 			sc->sc_flags &= ~WM_F_WOL;
   2720 		break;
   2721 	}
   2722 
   2723 	if (sc->sc_type >= WM_T_82575) {
   2724 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2725 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2726 			    nvmword);
   2727 			if ((sc->sc_type == WM_T_82575) ||
   2728 			    (sc->sc_type == WM_T_82576)) {
   2729 				/* Check NVM for autonegotiation */
   2730 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2731 				    != 0)
   2732 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2733 			}
   2734 			if ((sc->sc_type == WM_T_82575) ||
   2735 			    (sc->sc_type == WM_T_I350)) {
   2736 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2737 					sc->sc_flags |= WM_F_MAS;
   2738 			}
   2739 		}
   2740 	}
   2741 
   2742 	/*
   2743 	 * XXX need special handling for some multiple port cards
   2744 	 * to disable a paticular port.
   2745 	 */
   2746 
   2747 	if (sc->sc_type >= WM_T_82544) {
   2748 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2749 		if (pn != NULL) {
   2750 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2751 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2752 		} else {
   2753 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2754 				aprint_error_dev(sc->sc_dev,
   2755 				    "unable to read SWDPIN\n");
   2756 				goto out;
   2757 			}
   2758 		}
   2759 	}
   2760 
   2761 	if (cfg1 & NVM_CFG1_ILOS)
   2762 		sc->sc_ctrl |= CTRL_ILOS;
   2763 
   2764 	/*
   2765 	 * XXX
   2766 	 * This code isn't correct because pin 2 and 3 are located
   2767 	 * in different position on newer chips. Check all datasheet.
   2768 	 *
   2769 	 * Until resolve this problem, check if a chip < 82580
   2770 	 */
   2771 	if (sc->sc_type <= WM_T_82580) {
   2772 		if (sc->sc_type >= WM_T_82544) {
   2773 			sc->sc_ctrl |=
   2774 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2775 			    CTRL_SWDPIO_SHIFT;
   2776 			sc->sc_ctrl |=
   2777 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2778 			    CTRL_SWDPINS_SHIFT;
   2779 		} else {
   2780 			sc->sc_ctrl |=
   2781 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2782 			    CTRL_SWDPIO_SHIFT;
   2783 		}
   2784 	}
   2785 
   2786 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2787 		wm_nvm_read(sc,
   2788 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2789 		    1, &nvmword);
   2790 		if (nvmword & NVM_CFG3_ILOS)
   2791 			sc->sc_ctrl |= CTRL_ILOS;
   2792 	}
   2793 
   2794 #if 0
   2795 	if (sc->sc_type >= WM_T_82544) {
   2796 		if (cfg1 & NVM_CFG1_IPS0)
   2797 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2798 		if (cfg1 & NVM_CFG1_IPS1)
   2799 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2800 		sc->sc_ctrl_ext |=
   2801 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2802 		    CTRL_EXT_SWDPIO_SHIFT;
   2803 		sc->sc_ctrl_ext |=
   2804 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2805 		    CTRL_EXT_SWDPINS_SHIFT;
   2806 	} else {
   2807 		sc->sc_ctrl_ext |=
   2808 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2809 		    CTRL_EXT_SWDPIO_SHIFT;
   2810 	}
   2811 #endif
   2812 
   2813 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2814 #if 0
   2815 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2816 #endif
   2817 
   2818 	if (sc->sc_type == WM_T_PCH) {
   2819 		uint16_t val;
   2820 
   2821 		/* Save the NVM K1 bit setting */
   2822 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2823 
   2824 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2825 			sc->sc_nvm_k1_enabled = 1;
   2826 		else
   2827 			sc->sc_nvm_k1_enabled = 0;
   2828 	}
   2829 
   2830 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2831 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2832 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2833 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2834 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2835 	    || sc->sc_type == WM_T_82573
   2836 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2837 		/* Copper only */
   2838 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2839 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2840 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2841 	    || (sc->sc_type ==WM_T_I211)) {
   2842 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2843 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2844 		switch (link_mode) {
   2845 		case CTRL_EXT_LINK_MODE_1000KX:
   2846 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2847 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2848 			break;
   2849 		case CTRL_EXT_LINK_MODE_SGMII:
   2850 			if (wm_sgmii_uses_mdio(sc)) {
   2851 				aprint_normal_dev(sc->sc_dev,
   2852 				    "SGMII(MDIO)\n");
   2853 				sc->sc_flags |= WM_F_SGMII;
   2854 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2855 				break;
   2856 			}
   2857 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2858 			/*FALLTHROUGH*/
   2859 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2860 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2861 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2862 				if (link_mode
   2863 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2864 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2865 					sc->sc_flags |= WM_F_SGMII;
   2866 					aprint_verbose_dev(sc->sc_dev,
   2867 					    "SGMII\n");
   2868 				} else {
   2869 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2870 					aprint_verbose_dev(sc->sc_dev,
   2871 					    "SERDES\n");
   2872 				}
   2873 				break;
   2874 			}
   2875 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2876 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2877 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2878 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2879 				sc->sc_flags |= WM_F_SGMII;
   2880 			}
   2881 			/* Do not change link mode for 100BaseFX */
   2882 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2883 				break;
   2884 
   2885 			/* Change current link mode setting */
   2886 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2887 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2888 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2889 			else
   2890 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2891 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2892 			break;
   2893 		case CTRL_EXT_LINK_MODE_GMII:
   2894 		default:
   2895 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2896 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2897 			break;
   2898 		}
   2899 
   2900 		reg &= ~CTRL_EXT_I2C_ENA;
   2901 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2902 			reg |= CTRL_EXT_I2C_ENA;
   2903 		else
   2904 			reg &= ~CTRL_EXT_I2C_ENA;
   2905 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2906 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2907 			if (!wm_sgmii_uses_mdio(sc))
   2908 				wm_gmii_setup_phytype(sc, 0, 0);
   2909 			wm_reset_mdicnfg_82580(sc);
   2910 		}
   2911 	} else if (sc->sc_type < WM_T_82543 ||
   2912 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2913 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2914 			aprint_error_dev(sc->sc_dev,
   2915 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2916 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2917 		}
   2918 	} else {
   2919 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2920 			aprint_error_dev(sc->sc_dev,
   2921 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2922 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2923 		}
   2924 	}
   2925 
   2926 	if (sc->sc_type >= WM_T_PCH2)
   2927 		sc->sc_flags |= WM_F_EEE;
   2928 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2929 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2930 		/* XXX: Need special handling for I354. (not yet) */
   2931 		if (sc->sc_type != WM_T_I354)
   2932 			sc->sc_flags |= WM_F_EEE;
   2933 	}
   2934 
   2935 	/*
   2936 	 * The I350 has a bug where it always strips the CRC whether
   2937 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   2938 	 */
   2939 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2940 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2941 		sc->sc_flags |= WM_F_CRC_STRIP;
   2942 
   2943 	/* Set device properties (macflags) */
   2944 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2945 
   2946 	if (sc->sc_flags != 0) {
   2947 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2948 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2949 	}
   2950 
   2951 #ifdef WM_MPSAFE
   2952 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2953 #else
   2954 	sc->sc_core_lock = NULL;
   2955 #endif
   2956 
   2957 	/* Initialize the media structures accordingly. */
   2958 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2959 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2960 	else
   2961 		wm_tbi_mediainit(sc); /* All others */
   2962 
   2963 	ifp = &sc->sc_ethercom.ec_if;
   2964 	xname = device_xname(sc->sc_dev);
   2965 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2966 	ifp->if_softc = sc;
   2967 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2968 #ifdef WM_MPSAFE
   2969 	ifp->if_extflags = IFEF_MPSAFE;
   2970 #endif
   2971 	ifp->if_ioctl = wm_ioctl;
   2972 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2973 		ifp->if_start = wm_nq_start;
   2974 		/*
   2975 		 * When the number of CPUs is one and the controller can use
   2976 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2977 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2978 		 * and the other is used for link status changing.
   2979 		 * In this situation, wm_nq_transmit() is disadvantageous
   2980 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2981 		 */
   2982 		if (wm_is_using_multiqueue(sc))
   2983 			ifp->if_transmit = wm_nq_transmit;
   2984 	} else {
   2985 		ifp->if_start = wm_start;
   2986 		/*
   2987 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2988 		 */
   2989 		if (wm_is_using_multiqueue(sc))
   2990 			ifp->if_transmit = wm_transmit;
   2991 	}
   2992 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2993 	ifp->if_init = wm_init;
   2994 	ifp->if_stop = wm_stop;
   2995 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2996 	IFQ_SET_READY(&ifp->if_snd);
   2997 
   2998 	/* Check for jumbo frame */
   2999 	switch (sc->sc_type) {
   3000 	case WM_T_82573:
   3001 		/* XXX limited to 9234 if ASPM is disabled */
   3002 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3003 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3004 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3005 		break;
   3006 	case WM_T_82571:
   3007 	case WM_T_82572:
   3008 	case WM_T_82574:
   3009 	case WM_T_82583:
   3010 	case WM_T_82575:
   3011 	case WM_T_82576:
   3012 	case WM_T_82580:
   3013 	case WM_T_I350:
   3014 	case WM_T_I354:
   3015 	case WM_T_I210:
   3016 	case WM_T_I211:
   3017 	case WM_T_80003:
   3018 	case WM_T_ICH9:
   3019 	case WM_T_ICH10:
   3020 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3021 	case WM_T_PCH_LPT:
   3022 	case WM_T_PCH_SPT:
   3023 	case WM_T_PCH_CNP:
   3024 		/* XXX limited to 9234 */
   3025 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3026 		break;
   3027 	case WM_T_PCH:
   3028 		/* XXX limited to 4096 */
   3029 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3030 		break;
   3031 	case WM_T_82542_2_0:
   3032 	case WM_T_82542_2_1:
   3033 	case WM_T_ICH8:
   3034 		/* No support for jumbo frame */
   3035 		break;
   3036 	default:
   3037 		/* ETHER_MAX_LEN_JUMBO */
   3038 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3039 		break;
   3040 	}
   3041 
   3042 	/* If we're a i82543 or greater, we can support VLANs. */
   3043 	if (sc->sc_type >= WM_T_82543) {
   3044 		sc->sc_ethercom.ec_capabilities |=
   3045 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3046 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3047 	}
   3048 
   3049 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3050 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3051 
   3052 	/*
   3053 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   3054 	 * on i82543 and later.
   3055 	 */
   3056 	if (sc->sc_type >= WM_T_82543) {
   3057 		ifp->if_capabilities |=
   3058 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3059 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3060 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3061 		    IFCAP_CSUM_TCPv6_Tx |
   3062 		    IFCAP_CSUM_UDPv6_Tx;
   3063 	}
   3064 
   3065 	/*
   3066 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3067 	 *
   3068 	 *	82541GI (8086:1076) ... no
   3069 	 *	82572EI (8086:10b9) ... yes
   3070 	 */
   3071 	if (sc->sc_type >= WM_T_82571) {
   3072 		ifp->if_capabilities |=
   3073 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3074 	}
   3075 
   3076 	/*
   3077 	 * If we're a i82544 or greater (except i82547), we can do
   3078 	 * TCP segmentation offload.
   3079 	 */
   3080 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3081 		ifp->if_capabilities |= IFCAP_TSOv4;
   3082 	}
   3083 
   3084 	if (sc->sc_type >= WM_T_82571) {
   3085 		ifp->if_capabilities |= IFCAP_TSOv6;
   3086 	}
   3087 
   3088 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3089 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3090 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3091 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3092 
   3093 	/* Attach the interface. */
   3094 	if_initialize(ifp);
   3095 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3096 	ether_ifattach(ifp, enaddr);
   3097 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3098 	if_register(ifp);
   3099 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3100 	    RND_FLAG_DEFAULT);
   3101 
   3102 #ifdef WM_EVENT_COUNTERS
   3103 	/* Attach event counters. */
   3104 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3105 	    NULL, xname, "linkintr");
   3106 
   3107 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3108 	    NULL, xname, "tx_xoff");
   3109 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3110 	    NULL, xname, "tx_xon");
   3111 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3112 	    NULL, xname, "rx_xoff");
   3113 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3114 	    NULL, xname, "rx_xon");
   3115 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3116 	    NULL, xname, "rx_macctl");
   3117 #endif /* WM_EVENT_COUNTERS */
   3118 
   3119 	sc->sc_txrx_use_workqueue = false;
   3120 
   3121 	if (wm_phy_need_linkdown_discard(sc))
   3122 		wm_set_linkdown_discard(sc);
   3123 
   3124 	wm_init_sysctls(sc);
   3125 
   3126 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3127 		pmf_class_network_register(self, ifp);
   3128 	else
   3129 		aprint_error_dev(self, "couldn't establish power handler\n");
   3130 
   3131 	sc->sc_flags |= WM_F_ATTACHED;
   3132 out:
   3133 	return;
   3134 }
   3135 
   3136 /* The detach function (ca_detach) */
   3137 static int
   3138 wm_detach(device_t self, int flags __unused)
   3139 {
   3140 	struct wm_softc *sc = device_private(self);
   3141 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3142 	int i;
   3143 
   3144 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3145 		return 0;
   3146 
   3147 	/* Stop the interface. Callouts are stopped in it. */
   3148 	wm_stop(ifp, 1);
   3149 
   3150 	pmf_device_deregister(self);
   3151 
   3152 	sysctl_teardown(&sc->sc_sysctllog);
   3153 
   3154 #ifdef WM_EVENT_COUNTERS
   3155 	evcnt_detach(&sc->sc_ev_linkintr);
   3156 
   3157 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3158 	evcnt_detach(&sc->sc_ev_tx_xon);
   3159 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3160 	evcnt_detach(&sc->sc_ev_rx_xon);
   3161 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3162 #endif /* WM_EVENT_COUNTERS */
   3163 
   3164 	rnd_detach_source(&sc->rnd_source);
   3165 
   3166 	/* Tell the firmware about the release */
   3167 	WM_CORE_LOCK(sc);
   3168 	wm_release_manageability(sc);
   3169 	wm_release_hw_control(sc);
   3170 	wm_enable_wakeup(sc);
   3171 	WM_CORE_UNLOCK(sc);
   3172 
   3173 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3174 
   3175 	ether_ifdetach(ifp);
   3176 	if_detach(ifp);
   3177 	if_percpuq_destroy(sc->sc_ipq);
   3178 
   3179 	/* Delete all remaining media. */
   3180 	ifmedia_fini(&sc->sc_mii.mii_media);
   3181 
   3182 	/* Unload RX dmamaps and free mbufs */
   3183 	for (i = 0; i < sc->sc_nqueues; i++) {
   3184 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3185 		mutex_enter(rxq->rxq_lock);
   3186 		wm_rxdrain(rxq);
   3187 		mutex_exit(rxq->rxq_lock);
   3188 	}
   3189 	/* Must unlock here */
   3190 
   3191 	/* Disestablish the interrupt handler */
   3192 	for (i = 0; i < sc->sc_nintrs; i++) {
   3193 		if (sc->sc_ihs[i] != NULL) {
   3194 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3195 			sc->sc_ihs[i] = NULL;
   3196 		}
   3197 	}
   3198 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3199 
   3200 	/* wm_stop() ensure workqueue is stopped. */
   3201 	workqueue_destroy(sc->sc_queue_wq);
   3202 
   3203 	for (i = 0; i < sc->sc_nqueues; i++)
   3204 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3205 
   3206 	wm_free_txrx_queues(sc);
   3207 
   3208 	/* Unmap the registers */
   3209 	if (sc->sc_ss) {
   3210 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3211 		sc->sc_ss = 0;
   3212 	}
   3213 	if (sc->sc_ios) {
   3214 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3215 		sc->sc_ios = 0;
   3216 	}
   3217 	if (sc->sc_flashs) {
   3218 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3219 		sc->sc_flashs = 0;
   3220 	}
   3221 
   3222 	if (sc->sc_core_lock)
   3223 		mutex_obj_free(sc->sc_core_lock);
   3224 	if (sc->sc_ich_phymtx)
   3225 		mutex_obj_free(sc->sc_ich_phymtx);
   3226 	if (sc->sc_ich_nvmmtx)
   3227 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3228 
   3229 	return 0;
   3230 }
   3231 
   3232 static bool
   3233 wm_suspend(device_t self, const pmf_qual_t *qual)
   3234 {
   3235 	struct wm_softc *sc = device_private(self);
   3236 
   3237 	wm_release_manageability(sc);
   3238 	wm_release_hw_control(sc);
   3239 	wm_enable_wakeup(sc);
   3240 
   3241 	return true;
   3242 }
   3243 
   3244 static bool
   3245 wm_resume(device_t self, const pmf_qual_t *qual)
   3246 {
   3247 	struct wm_softc *sc = device_private(self);
   3248 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3249 	pcireg_t reg;
   3250 	char buf[256];
   3251 
   3252 	reg = CSR_READ(sc, WMREG_WUS);
   3253 	if (reg != 0) {
   3254 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3255 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3256 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3257 	}
   3258 
   3259 	if (sc->sc_type >= WM_T_PCH2)
   3260 		wm_resume_workarounds_pchlan(sc);
   3261 	if ((ifp->if_flags & IFF_UP) == 0) {
   3262 		wm_reset(sc);
   3263 		/* Non-AMT based hardware can now take control from firmware */
   3264 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3265 			wm_get_hw_control(sc);
   3266 		wm_init_manageability(sc);
   3267 	} else {
   3268 		/*
   3269 		 * We called pmf_class_network_register(), so if_init() is
   3270 		 * automatically called when IFF_UP. wm_reset(),
   3271 		 * wm_get_hw_control() and wm_init_manageability() are called
   3272 		 * via wm_init().
   3273 		 */
   3274 	}
   3275 
   3276 	return true;
   3277 }
   3278 
   3279 /*
   3280  * wm_watchdog:		[ifnet interface function]
   3281  *
   3282  *	Watchdog timer handler.
   3283  */
   3284 static void
   3285 wm_watchdog(struct ifnet *ifp)
   3286 {
   3287 	int qid;
   3288 	struct wm_softc *sc = ifp->if_softc;
   3289 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3290 
   3291 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3292 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3293 
   3294 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3295 	}
   3296 
   3297 	/* IF any of queues hanged up, reset the interface. */
   3298 	if (hang_queue != 0) {
   3299 		(void)wm_init(ifp);
   3300 
   3301 		/*
   3302 		 * There are still some upper layer processing which call
   3303 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3304 		 */
   3305 		/* Try to get more packets going. */
   3306 		ifp->if_start(ifp);
   3307 	}
   3308 }
   3309 
   3310 
   3311 static void
   3312 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3313 {
   3314 
   3315 	mutex_enter(txq->txq_lock);
   3316 	if (txq->txq_sending &&
   3317 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3318 		wm_watchdog_txq_locked(ifp, txq, hang);
   3319 
   3320 	mutex_exit(txq->txq_lock);
   3321 }
   3322 
   3323 static void
   3324 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3325     uint16_t *hang)
   3326 {
   3327 	struct wm_softc *sc = ifp->if_softc;
   3328 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3329 
   3330 	KASSERT(mutex_owned(txq->txq_lock));
   3331 
   3332 	/*
   3333 	 * Since we're using delayed interrupts, sweep up
   3334 	 * before we report an error.
   3335 	 */
   3336 	wm_txeof(txq, UINT_MAX);
   3337 
   3338 	if (txq->txq_sending)
   3339 		*hang |= __BIT(wmq->wmq_id);
   3340 
   3341 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3342 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3343 		    device_xname(sc->sc_dev));
   3344 	} else {
   3345 #ifdef WM_DEBUG
   3346 		int i, j;
   3347 		struct wm_txsoft *txs;
   3348 #endif
   3349 		log(LOG_ERR,
   3350 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3351 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3352 		    txq->txq_next);
   3353 		if_statinc(ifp, if_oerrors);
   3354 #ifdef WM_DEBUG
   3355 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3356 		    i = WM_NEXTTXS(txq, i)) {
   3357 			txs = &txq->txq_soft[i];
   3358 			printf("txs %d tx %d -> %d\n",
   3359 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3360 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3361 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3362 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3363 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3364 					printf("\t %#08x%08x\n",
   3365 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3366 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3367 				} else {
   3368 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3369 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3370 					    txq->txq_descs[j].wtx_addr.wa_low);
   3371 					printf("\t %#04x%02x%02x%08x\n",
   3372 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3373 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3374 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3375 					    txq->txq_descs[j].wtx_cmdlen);
   3376 				}
   3377 				if (j == txs->txs_lastdesc)
   3378 					break;
   3379 			}
   3380 		}
   3381 #endif
   3382 	}
   3383 }
   3384 
   3385 /*
   3386  * wm_tick:
   3387  *
   3388  *	One second timer, used to check link status, sweep up
   3389  *	completed transmit jobs, etc.
   3390  */
   3391 static void
   3392 wm_tick(void *arg)
   3393 {
   3394 	struct wm_softc *sc = arg;
   3395 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3396 #ifndef WM_MPSAFE
   3397 	int s = splnet();
   3398 #endif
   3399 
   3400 	WM_CORE_LOCK(sc);
   3401 
   3402 	if (sc->sc_core_stopping) {
   3403 		WM_CORE_UNLOCK(sc);
   3404 #ifndef WM_MPSAFE
   3405 		splx(s);
   3406 #endif
   3407 		return;
   3408 	}
   3409 
   3410 	if (sc->sc_type >= WM_T_82542_2_1) {
   3411 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3412 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3413 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3414 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3415 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3416 	}
   3417 
   3418 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3419 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3420 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3421 	    + CSR_READ(sc, WMREG_CRCERRS)
   3422 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3423 	    + CSR_READ(sc, WMREG_SYMERRC)
   3424 	    + CSR_READ(sc, WMREG_RXERRC)
   3425 	    + CSR_READ(sc, WMREG_SEC)
   3426 	    + CSR_READ(sc, WMREG_CEXTERR)
   3427 	    + CSR_READ(sc, WMREG_RLEC));
   3428 	/*
   3429 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3430 	 * memory. It does not mean the number of dropped packet. Because
   3431 	 * ethernet controller can receive packets in such case if there is
   3432 	 * space in phy's FIFO.
   3433 	 *
   3434 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3435 	 * own EVCNT instead of if_iqdrops.
   3436 	 */
   3437 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3438 	IF_STAT_PUTREF(ifp);
   3439 
   3440 	if (sc->sc_flags & WM_F_HAS_MII)
   3441 		mii_tick(&sc->sc_mii);
   3442 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3443 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3444 		wm_serdes_tick(sc);
   3445 	else
   3446 		wm_tbi_tick(sc);
   3447 
   3448 	WM_CORE_UNLOCK(sc);
   3449 
   3450 	wm_watchdog(ifp);
   3451 
   3452 	callout_schedule(&sc->sc_tick_ch, hz);
   3453 }
   3454 
   3455 static int
   3456 wm_ifflags_cb(struct ethercom *ec)
   3457 {
   3458 	struct ifnet *ifp = &ec->ec_if;
   3459 	struct wm_softc *sc = ifp->if_softc;
   3460 	u_short iffchange;
   3461 	int ecchange;
   3462 	bool needreset = false;
   3463 	int rc = 0;
   3464 
   3465 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3466 		device_xname(sc->sc_dev), __func__));
   3467 
   3468 	WM_CORE_LOCK(sc);
   3469 
   3470 	/*
   3471 	 * Check for if_flags.
   3472 	 * Main usage is to prevent linkdown when opening bpf.
   3473 	 */
   3474 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3475 	sc->sc_if_flags = ifp->if_flags;
   3476 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3477 		needreset = true;
   3478 		goto ec;
   3479 	}
   3480 
   3481 	/* iff related updates */
   3482 	if ((iffchange & IFF_PROMISC) != 0)
   3483 		wm_set_filter(sc);
   3484 
   3485 	wm_set_vlan(sc);
   3486 
   3487 ec:
   3488 	/* Check for ec_capenable. */
   3489 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3490 	sc->sc_ec_capenable = ec->ec_capenable;
   3491 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3492 		needreset = true;
   3493 		goto out;
   3494 	}
   3495 
   3496 	/* ec related updates */
   3497 	wm_set_eee(sc);
   3498 
   3499 out:
   3500 	if (needreset)
   3501 		rc = ENETRESET;
   3502 	WM_CORE_UNLOCK(sc);
   3503 
   3504 	return rc;
   3505 }
   3506 
   3507 static bool
   3508 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3509 {
   3510 
   3511 	switch (sc->sc_phytype) {
   3512 	case WMPHY_82577: /* ihphy */
   3513 	case WMPHY_82578: /* atphy */
   3514 	case WMPHY_82579: /* ihphy */
   3515 	case WMPHY_I217: /* ihphy */
   3516 	case WMPHY_82580: /* ihphy */
   3517 	case WMPHY_I350: /* ihphy */
   3518 		return true;
   3519 	default:
   3520 		return false;
   3521 	}
   3522 }
   3523 
   3524 static void
   3525 wm_set_linkdown_discard(struct wm_softc *sc)
   3526 {
   3527 
   3528 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3529 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3530 
   3531 		mutex_enter(txq->txq_lock);
   3532 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3533 		mutex_exit(txq->txq_lock);
   3534 	}
   3535 }
   3536 
   3537 static void
   3538 wm_clear_linkdown_discard(struct wm_softc *sc)
   3539 {
   3540 
   3541 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3542 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3543 
   3544 		mutex_enter(txq->txq_lock);
   3545 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   3546 		mutex_exit(txq->txq_lock);
   3547 	}
   3548 }
   3549 
   3550 /*
   3551  * wm_ioctl:		[ifnet interface function]
   3552  *
   3553  *	Handle control requests from the operator.
   3554  */
   3555 static int
   3556 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3557 {
   3558 	struct wm_softc *sc = ifp->if_softc;
   3559 	struct ifreq *ifr = (struct ifreq *)data;
   3560 	struct ifaddr *ifa = (struct ifaddr *)data;
   3561 	struct sockaddr_dl *sdl;
   3562 	int s, error;
   3563 
   3564 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3565 		device_xname(sc->sc_dev), __func__));
   3566 
   3567 #ifndef WM_MPSAFE
   3568 	s = splnet();
   3569 #endif
   3570 	switch (cmd) {
   3571 	case SIOCSIFMEDIA:
   3572 		WM_CORE_LOCK(sc);
   3573 		/* Flow control requires full-duplex mode. */
   3574 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3575 		    (ifr->ifr_media & IFM_FDX) == 0)
   3576 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3577 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3578 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3579 				/* We can do both TXPAUSE and RXPAUSE. */
   3580 				ifr->ifr_media |=
   3581 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3582 			}
   3583 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3584 		}
   3585 		WM_CORE_UNLOCK(sc);
   3586 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3587 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   3588 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE)
   3589 				wm_set_linkdown_discard(sc);
   3590 			else
   3591 				wm_clear_linkdown_discard(sc);
   3592 		}
   3593 		break;
   3594 	case SIOCINITIFADDR:
   3595 		WM_CORE_LOCK(sc);
   3596 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3597 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3598 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3599 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3600 			/* Unicast address is the first multicast entry */
   3601 			wm_set_filter(sc);
   3602 			error = 0;
   3603 			WM_CORE_UNLOCK(sc);
   3604 			break;
   3605 		}
   3606 		WM_CORE_UNLOCK(sc);
   3607 		if (((ifp->if_flags & IFF_UP) == 0) && wm_phy_need_linkdown_discard(sc))
   3608 			wm_clear_linkdown_discard(sc);
   3609 		/*FALLTHROUGH*/
   3610 	default:
   3611 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   3612 			if (((ifp->if_flags & IFF_UP) == 0) && ((ifr->ifr_flags & IFF_UP) != 0)) {
   3613 				wm_clear_linkdown_discard(sc);
   3614 			} else if (((ifp->if_flags & IFF_UP) != 0) && ((ifr->ifr_flags & IFF_UP) == 0)) {
   3615 				wm_set_linkdown_discard(sc);
   3616 			}
   3617 		}
   3618 #ifdef WM_MPSAFE
   3619 		s = splnet();
   3620 #endif
   3621 		/* It may call wm_start, so unlock here */
   3622 		error = ether_ioctl(ifp, cmd, data);
   3623 #ifdef WM_MPSAFE
   3624 		splx(s);
   3625 #endif
   3626 		if (error != ENETRESET)
   3627 			break;
   3628 
   3629 		error = 0;
   3630 
   3631 		if (cmd == SIOCSIFCAP)
   3632 			error = (*ifp->if_init)(ifp);
   3633 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3634 			;
   3635 		else if (ifp->if_flags & IFF_RUNNING) {
   3636 			/*
   3637 			 * Multicast list has changed; set the hardware filter
   3638 			 * accordingly.
   3639 			 */
   3640 			WM_CORE_LOCK(sc);
   3641 			wm_set_filter(sc);
   3642 			WM_CORE_UNLOCK(sc);
   3643 		}
   3644 		break;
   3645 	}
   3646 
   3647 #ifndef WM_MPSAFE
   3648 	splx(s);
   3649 #endif
   3650 	return error;
   3651 }
   3652 
   3653 /* MAC address related */
   3654 
   3655 /*
   3656  * Get the offset of MAC address and return it.
   3657  * If error occured, use offset 0.
   3658  */
   3659 static uint16_t
   3660 wm_check_alt_mac_addr(struct wm_softc *sc)
   3661 {
   3662 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3663 	uint16_t offset = NVM_OFF_MACADDR;
   3664 
   3665 	/* Try to read alternative MAC address pointer */
   3666 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3667 		return 0;
   3668 
   3669 	/* Check pointer if it's valid or not. */
   3670 	if ((offset == 0x0000) || (offset == 0xffff))
   3671 		return 0;
   3672 
   3673 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3674 	/*
   3675 	 * Check whether alternative MAC address is valid or not.
   3676 	 * Some cards have non 0xffff pointer but those don't use
   3677 	 * alternative MAC address in reality.
   3678 	 *
   3679 	 * Check whether the broadcast bit is set or not.
   3680 	 */
   3681 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3682 		if (((myea[0] & 0xff) & 0x01) == 0)
   3683 			return offset; /* Found */
   3684 
   3685 	/* Not found */
   3686 	return 0;
   3687 }
   3688 
   3689 static int
   3690 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3691 {
   3692 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3693 	uint16_t offset = NVM_OFF_MACADDR;
   3694 	int do_invert = 0;
   3695 
   3696 	switch (sc->sc_type) {
   3697 	case WM_T_82580:
   3698 	case WM_T_I350:
   3699 	case WM_T_I354:
   3700 		/* EEPROM Top Level Partitioning */
   3701 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3702 		break;
   3703 	case WM_T_82571:
   3704 	case WM_T_82575:
   3705 	case WM_T_82576:
   3706 	case WM_T_80003:
   3707 	case WM_T_I210:
   3708 	case WM_T_I211:
   3709 		offset = wm_check_alt_mac_addr(sc);
   3710 		if (offset == 0)
   3711 			if ((sc->sc_funcid & 0x01) == 1)
   3712 				do_invert = 1;
   3713 		break;
   3714 	default:
   3715 		if ((sc->sc_funcid & 0x01) == 1)
   3716 			do_invert = 1;
   3717 		break;
   3718 	}
   3719 
   3720 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3721 		goto bad;
   3722 
   3723 	enaddr[0] = myea[0] & 0xff;
   3724 	enaddr[1] = myea[0] >> 8;
   3725 	enaddr[2] = myea[1] & 0xff;
   3726 	enaddr[3] = myea[1] >> 8;
   3727 	enaddr[4] = myea[2] & 0xff;
   3728 	enaddr[5] = myea[2] >> 8;
   3729 
   3730 	/*
   3731 	 * Toggle the LSB of the MAC address on the second port
   3732 	 * of some dual port cards.
   3733 	 */
   3734 	if (do_invert != 0)
   3735 		enaddr[5] ^= 1;
   3736 
   3737 	return 0;
   3738 
   3739  bad:
   3740 	return -1;
   3741 }
   3742 
   3743 /*
   3744  * wm_set_ral:
   3745  *
   3746  *	Set an entery in the receive address list.
   3747  */
   3748 static void
   3749 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3750 {
   3751 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3752 	uint32_t wlock_mac;
   3753 	int rv;
   3754 
   3755 	if (enaddr != NULL) {
   3756 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3757 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3758 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3759 		ral_hi |= RAL_AV;
   3760 	} else {
   3761 		ral_lo = 0;
   3762 		ral_hi = 0;
   3763 	}
   3764 
   3765 	switch (sc->sc_type) {
   3766 	case WM_T_82542_2_0:
   3767 	case WM_T_82542_2_1:
   3768 	case WM_T_82543:
   3769 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3770 		CSR_WRITE_FLUSH(sc);
   3771 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3772 		CSR_WRITE_FLUSH(sc);
   3773 		break;
   3774 	case WM_T_PCH2:
   3775 	case WM_T_PCH_LPT:
   3776 	case WM_T_PCH_SPT:
   3777 	case WM_T_PCH_CNP:
   3778 		if (idx == 0) {
   3779 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3780 			CSR_WRITE_FLUSH(sc);
   3781 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3782 			CSR_WRITE_FLUSH(sc);
   3783 			return;
   3784 		}
   3785 		if (sc->sc_type != WM_T_PCH2) {
   3786 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3787 			    FWSM_WLOCK_MAC);
   3788 			addrl = WMREG_SHRAL(idx - 1);
   3789 			addrh = WMREG_SHRAH(idx - 1);
   3790 		} else {
   3791 			wlock_mac = 0;
   3792 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3793 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3794 		}
   3795 
   3796 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3797 			rv = wm_get_swflag_ich8lan(sc);
   3798 			if (rv != 0)
   3799 				return;
   3800 			CSR_WRITE(sc, addrl, ral_lo);
   3801 			CSR_WRITE_FLUSH(sc);
   3802 			CSR_WRITE(sc, addrh, ral_hi);
   3803 			CSR_WRITE_FLUSH(sc);
   3804 			wm_put_swflag_ich8lan(sc);
   3805 		}
   3806 
   3807 		break;
   3808 	default:
   3809 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3810 		CSR_WRITE_FLUSH(sc);
   3811 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3812 		CSR_WRITE_FLUSH(sc);
   3813 		break;
   3814 	}
   3815 }
   3816 
   3817 /*
   3818  * wm_mchash:
   3819  *
   3820  *	Compute the hash of the multicast address for the 4096-bit
   3821  *	multicast filter.
   3822  */
   3823 static uint32_t
   3824 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3825 {
   3826 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3827 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3828 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3829 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3830 	uint32_t hash;
   3831 
   3832 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3833 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3834 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3835 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3836 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3837 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3838 		return (hash & 0x3ff);
   3839 	}
   3840 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3841 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3842 
   3843 	return (hash & 0xfff);
   3844 }
   3845 
   3846 /*
   3847  *
   3848  *
   3849  */
   3850 static int
   3851 wm_rar_count(struct wm_softc *sc)
   3852 {
   3853 	int size;
   3854 
   3855 	switch (sc->sc_type) {
   3856 	case WM_T_ICH8:
   3857 		size = WM_RAL_TABSIZE_ICH8 -1;
   3858 		break;
   3859 	case WM_T_ICH9:
   3860 	case WM_T_ICH10:
   3861 	case WM_T_PCH:
   3862 		size = WM_RAL_TABSIZE_ICH8;
   3863 		break;
   3864 	case WM_T_PCH2:
   3865 		size = WM_RAL_TABSIZE_PCH2;
   3866 		break;
   3867 	case WM_T_PCH_LPT:
   3868 	case WM_T_PCH_SPT:
   3869 	case WM_T_PCH_CNP:
   3870 		size = WM_RAL_TABSIZE_PCH_LPT;
   3871 		break;
   3872 	case WM_T_82575:
   3873 	case WM_T_I210:
   3874 	case WM_T_I211:
   3875 		size = WM_RAL_TABSIZE_82575;
   3876 		break;
   3877 	case WM_T_82576:
   3878 	case WM_T_82580:
   3879 		size = WM_RAL_TABSIZE_82576;
   3880 		break;
   3881 	case WM_T_I350:
   3882 	case WM_T_I354:
   3883 		size = WM_RAL_TABSIZE_I350;
   3884 		break;
   3885 	default:
   3886 		size = WM_RAL_TABSIZE;
   3887 	}
   3888 
   3889 	return size;
   3890 }
   3891 
   3892 /*
   3893  * wm_set_filter:
   3894  *
   3895  *	Set up the receive filter.
   3896  */
   3897 static void
   3898 wm_set_filter(struct wm_softc *sc)
   3899 {
   3900 	struct ethercom *ec = &sc->sc_ethercom;
   3901 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3902 	struct ether_multi *enm;
   3903 	struct ether_multistep step;
   3904 	bus_addr_t mta_reg;
   3905 	uint32_t hash, reg, bit;
   3906 	int i, size, ralmax, rv;
   3907 
   3908 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3909 		device_xname(sc->sc_dev), __func__));
   3910 
   3911 	if (sc->sc_type >= WM_T_82544)
   3912 		mta_reg = WMREG_CORDOVA_MTA;
   3913 	else
   3914 		mta_reg = WMREG_MTA;
   3915 
   3916 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3917 
   3918 	if (ifp->if_flags & IFF_BROADCAST)
   3919 		sc->sc_rctl |= RCTL_BAM;
   3920 	if (ifp->if_flags & IFF_PROMISC) {
   3921 		sc->sc_rctl |= RCTL_UPE;
   3922 		ETHER_LOCK(ec);
   3923 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3924 		ETHER_UNLOCK(ec);
   3925 		goto allmulti;
   3926 	}
   3927 
   3928 	/*
   3929 	 * Set the station address in the first RAL slot, and
   3930 	 * clear the remaining slots.
   3931 	 */
   3932 	size = wm_rar_count(sc);
   3933 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3934 
   3935 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3936 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3937 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3938 		switch (i) {
   3939 		case 0:
   3940 			/* We can use all entries */
   3941 			ralmax = size;
   3942 			break;
   3943 		case 1:
   3944 			/* Only RAR[0] */
   3945 			ralmax = 1;
   3946 			break;
   3947 		default:
   3948 			/* Available SHRA + RAR[0] */
   3949 			ralmax = i + 1;
   3950 		}
   3951 	} else
   3952 		ralmax = size;
   3953 	for (i = 1; i < size; i++) {
   3954 		if (i < ralmax)
   3955 			wm_set_ral(sc, NULL, i);
   3956 	}
   3957 
   3958 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3959 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3960 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3961 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3962 		size = WM_ICH8_MC_TABSIZE;
   3963 	else
   3964 		size = WM_MC_TABSIZE;
   3965 	/* Clear out the multicast table. */
   3966 	for (i = 0; i < size; i++) {
   3967 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3968 		CSR_WRITE_FLUSH(sc);
   3969 	}
   3970 
   3971 	ETHER_LOCK(ec);
   3972 	ETHER_FIRST_MULTI(step, ec, enm);
   3973 	while (enm != NULL) {
   3974 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3975 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3976 			ETHER_UNLOCK(ec);
   3977 			/*
   3978 			 * We must listen to a range of multicast addresses.
   3979 			 * For now, just accept all multicasts, rather than
   3980 			 * trying to set only those filter bits needed to match
   3981 			 * the range.  (At this time, the only use of address
   3982 			 * ranges is for IP multicast routing, for which the
   3983 			 * range is big enough to require all bits set.)
   3984 			 */
   3985 			goto allmulti;
   3986 		}
   3987 
   3988 		hash = wm_mchash(sc, enm->enm_addrlo);
   3989 
   3990 		reg = (hash >> 5);
   3991 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3992 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3993 		    || (sc->sc_type == WM_T_PCH2)
   3994 		    || (sc->sc_type == WM_T_PCH_LPT)
   3995 		    || (sc->sc_type == WM_T_PCH_SPT)
   3996 		    || (sc->sc_type == WM_T_PCH_CNP))
   3997 			reg &= 0x1f;
   3998 		else
   3999 			reg &= 0x7f;
   4000 		bit = hash & 0x1f;
   4001 
   4002 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4003 		hash |= 1U << bit;
   4004 
   4005 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4006 			/*
   4007 			 * 82544 Errata 9: Certain register cannot be written
   4008 			 * with particular alignments in PCI-X bus operation
   4009 			 * (FCAH, MTA and VFTA).
   4010 			 */
   4011 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4012 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4013 			CSR_WRITE_FLUSH(sc);
   4014 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4015 			CSR_WRITE_FLUSH(sc);
   4016 		} else {
   4017 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4018 			CSR_WRITE_FLUSH(sc);
   4019 		}
   4020 
   4021 		ETHER_NEXT_MULTI(step, enm);
   4022 	}
   4023 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4024 	ETHER_UNLOCK(ec);
   4025 
   4026 	goto setit;
   4027 
   4028  allmulti:
   4029 	sc->sc_rctl |= RCTL_MPE;
   4030 
   4031  setit:
   4032 	if (sc->sc_type >= WM_T_PCH2) {
   4033 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4034 		    && (ifp->if_mtu > ETHERMTU))
   4035 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4036 		else
   4037 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4038 		if (rv != 0)
   4039 			device_printf(sc->sc_dev,
   4040 			    "Failed to do workaround for jumbo frame.\n");
   4041 	}
   4042 
   4043 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4044 }
   4045 
   4046 /* Reset and init related */
   4047 
   4048 static void
   4049 wm_set_vlan(struct wm_softc *sc)
   4050 {
   4051 
   4052 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4053 		device_xname(sc->sc_dev), __func__));
   4054 
   4055 	/* Deal with VLAN enables. */
   4056 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4057 		sc->sc_ctrl |= CTRL_VME;
   4058 	else
   4059 		sc->sc_ctrl &= ~CTRL_VME;
   4060 
   4061 	/* Write the control registers. */
   4062 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4063 }
   4064 
   4065 static void
   4066 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4067 {
   4068 	uint32_t gcr;
   4069 	pcireg_t ctrl2;
   4070 
   4071 	gcr = CSR_READ(sc, WMREG_GCR);
   4072 
   4073 	/* Only take action if timeout value is defaulted to 0 */
   4074 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4075 		goto out;
   4076 
   4077 	if ((gcr & GCR_CAP_VER2) == 0) {
   4078 		gcr |= GCR_CMPL_TMOUT_10MS;
   4079 		goto out;
   4080 	}
   4081 
   4082 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4083 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4084 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4085 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4086 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4087 
   4088 out:
   4089 	/* Disable completion timeout resend */
   4090 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4091 
   4092 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4093 }
   4094 
   4095 void
   4096 wm_get_auto_rd_done(struct wm_softc *sc)
   4097 {
   4098 	int i;
   4099 
   4100 	/* wait for eeprom to reload */
   4101 	switch (sc->sc_type) {
   4102 	case WM_T_82571:
   4103 	case WM_T_82572:
   4104 	case WM_T_82573:
   4105 	case WM_T_82574:
   4106 	case WM_T_82583:
   4107 	case WM_T_82575:
   4108 	case WM_T_82576:
   4109 	case WM_T_82580:
   4110 	case WM_T_I350:
   4111 	case WM_T_I354:
   4112 	case WM_T_I210:
   4113 	case WM_T_I211:
   4114 	case WM_T_80003:
   4115 	case WM_T_ICH8:
   4116 	case WM_T_ICH9:
   4117 		for (i = 0; i < 10; i++) {
   4118 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4119 				break;
   4120 			delay(1000);
   4121 		}
   4122 		if (i == 10) {
   4123 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4124 			    "complete\n", device_xname(sc->sc_dev));
   4125 		}
   4126 		break;
   4127 	default:
   4128 		break;
   4129 	}
   4130 }
   4131 
   4132 void
   4133 wm_lan_init_done(struct wm_softc *sc)
   4134 {
   4135 	uint32_t reg = 0;
   4136 	int i;
   4137 
   4138 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4139 		device_xname(sc->sc_dev), __func__));
   4140 
   4141 	/* Wait for eeprom to reload */
   4142 	switch (sc->sc_type) {
   4143 	case WM_T_ICH10:
   4144 	case WM_T_PCH:
   4145 	case WM_T_PCH2:
   4146 	case WM_T_PCH_LPT:
   4147 	case WM_T_PCH_SPT:
   4148 	case WM_T_PCH_CNP:
   4149 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4150 			reg = CSR_READ(sc, WMREG_STATUS);
   4151 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4152 				break;
   4153 			delay(100);
   4154 		}
   4155 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4156 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4157 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4158 		}
   4159 		break;
   4160 	default:
   4161 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4162 		    __func__);
   4163 		break;
   4164 	}
   4165 
   4166 	reg &= ~STATUS_LAN_INIT_DONE;
   4167 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4168 }
   4169 
   4170 void
   4171 wm_get_cfg_done(struct wm_softc *sc)
   4172 {
   4173 	int mask;
   4174 	uint32_t reg;
   4175 	int i;
   4176 
   4177 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4178 		device_xname(sc->sc_dev), __func__));
   4179 
   4180 	/* Wait for eeprom to reload */
   4181 	switch (sc->sc_type) {
   4182 	case WM_T_82542_2_0:
   4183 	case WM_T_82542_2_1:
   4184 		/* null */
   4185 		break;
   4186 	case WM_T_82543:
   4187 	case WM_T_82544:
   4188 	case WM_T_82540:
   4189 	case WM_T_82545:
   4190 	case WM_T_82545_3:
   4191 	case WM_T_82546:
   4192 	case WM_T_82546_3:
   4193 	case WM_T_82541:
   4194 	case WM_T_82541_2:
   4195 	case WM_T_82547:
   4196 	case WM_T_82547_2:
   4197 	case WM_T_82573:
   4198 	case WM_T_82574:
   4199 	case WM_T_82583:
   4200 		/* generic */
   4201 		delay(10*1000);
   4202 		break;
   4203 	case WM_T_80003:
   4204 	case WM_T_82571:
   4205 	case WM_T_82572:
   4206 	case WM_T_82575:
   4207 	case WM_T_82576:
   4208 	case WM_T_82580:
   4209 	case WM_T_I350:
   4210 	case WM_T_I354:
   4211 	case WM_T_I210:
   4212 	case WM_T_I211:
   4213 		if (sc->sc_type == WM_T_82571) {
   4214 			/* Only 82571 shares port 0 */
   4215 			mask = EEMNGCTL_CFGDONE_0;
   4216 		} else
   4217 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4218 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4219 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4220 				break;
   4221 			delay(1000);
   4222 		}
   4223 		if (i >= WM_PHY_CFG_TIMEOUT)
   4224 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4225 				device_xname(sc->sc_dev), __func__));
   4226 		break;
   4227 	case WM_T_ICH8:
   4228 	case WM_T_ICH9:
   4229 	case WM_T_ICH10:
   4230 	case WM_T_PCH:
   4231 	case WM_T_PCH2:
   4232 	case WM_T_PCH_LPT:
   4233 	case WM_T_PCH_SPT:
   4234 	case WM_T_PCH_CNP:
   4235 		delay(10*1000);
   4236 		if (sc->sc_type >= WM_T_ICH10)
   4237 			wm_lan_init_done(sc);
   4238 		else
   4239 			wm_get_auto_rd_done(sc);
   4240 
   4241 		/* Clear PHY Reset Asserted bit */
   4242 		reg = CSR_READ(sc, WMREG_STATUS);
   4243 		if ((reg & STATUS_PHYRA) != 0)
   4244 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4245 		break;
   4246 	default:
   4247 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4248 		    __func__);
   4249 		break;
   4250 	}
   4251 }
   4252 
   4253 int
   4254 wm_phy_post_reset(struct wm_softc *sc)
   4255 {
   4256 	device_t dev = sc->sc_dev;
   4257 	uint16_t reg;
   4258 	int rv = 0;
   4259 
   4260 	/* This function is only for ICH8 and newer. */
   4261 	if (sc->sc_type < WM_T_ICH8)
   4262 		return 0;
   4263 
   4264 	if (wm_phy_resetisblocked(sc)) {
   4265 		/* XXX */
   4266 		device_printf(dev, "PHY is blocked\n");
   4267 		return -1;
   4268 	}
   4269 
   4270 	/* Allow time for h/w to get to quiescent state after reset */
   4271 	delay(10*1000);
   4272 
   4273 	/* Perform any necessary post-reset workarounds */
   4274 	if (sc->sc_type == WM_T_PCH)
   4275 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4276 	else if (sc->sc_type == WM_T_PCH2)
   4277 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4278 	if (rv != 0)
   4279 		return rv;
   4280 
   4281 	/* Clear the host wakeup bit after lcd reset */
   4282 	if (sc->sc_type >= WM_T_PCH) {
   4283 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4284 		reg &= ~BM_WUC_HOST_WU_BIT;
   4285 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4286 	}
   4287 
   4288 	/* Configure the LCD with the extended configuration region in NVM */
   4289 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4290 		return rv;
   4291 
   4292 	/* Configure the LCD with the OEM bits in NVM */
   4293 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4294 
   4295 	if (sc->sc_type == WM_T_PCH2) {
   4296 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4297 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4298 			delay(10 * 1000);
   4299 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4300 		}
   4301 		/* Set EEE LPI Update Timer to 200usec */
   4302 		rv = sc->phy.acquire(sc);
   4303 		if (rv)
   4304 			return rv;
   4305 		rv = wm_write_emi_reg_locked(dev,
   4306 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4307 		sc->phy.release(sc);
   4308 	}
   4309 
   4310 	return rv;
   4311 }
   4312 
   4313 /* Only for PCH and newer */
   4314 static int
   4315 wm_write_smbus_addr(struct wm_softc *sc)
   4316 {
   4317 	uint32_t strap, freq;
   4318 	uint16_t phy_data;
   4319 	int rv;
   4320 
   4321 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4322 		device_xname(sc->sc_dev), __func__));
   4323 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4324 
   4325 	strap = CSR_READ(sc, WMREG_STRAP);
   4326 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4327 
   4328 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4329 	if (rv != 0)
   4330 		return -1;
   4331 
   4332 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4333 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4334 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4335 
   4336 	if (sc->sc_phytype == WMPHY_I217) {
   4337 		/* Restore SMBus frequency */
   4338 		if (freq --) {
   4339 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4340 			    | HV_SMB_ADDR_FREQ_HIGH);
   4341 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4342 			    HV_SMB_ADDR_FREQ_LOW);
   4343 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4344 			    HV_SMB_ADDR_FREQ_HIGH);
   4345 		} else
   4346 			DPRINTF(sc, WM_DEBUG_INIT,
   4347 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4348 				device_xname(sc->sc_dev), __func__));
   4349 	}
   4350 
   4351 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4352 	    phy_data);
   4353 }
   4354 
   4355 static int
   4356 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4357 {
   4358 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4359 	uint16_t phy_page = 0;
   4360 	int rv = 0;
   4361 
   4362 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4363 		device_xname(sc->sc_dev), __func__));
   4364 
   4365 	switch (sc->sc_type) {
   4366 	case WM_T_ICH8:
   4367 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4368 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4369 			return 0;
   4370 
   4371 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4372 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4373 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4374 			break;
   4375 		}
   4376 		/* FALLTHROUGH */
   4377 	case WM_T_PCH:
   4378 	case WM_T_PCH2:
   4379 	case WM_T_PCH_LPT:
   4380 	case WM_T_PCH_SPT:
   4381 	case WM_T_PCH_CNP:
   4382 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4383 		break;
   4384 	default:
   4385 		return 0;
   4386 	}
   4387 
   4388 	if ((rv = sc->phy.acquire(sc)) != 0)
   4389 		return rv;
   4390 
   4391 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4392 	if ((reg & sw_cfg_mask) == 0)
   4393 		goto release;
   4394 
   4395 	/*
   4396 	 * Make sure HW does not configure LCD from PHY extended configuration
   4397 	 * before SW configuration
   4398 	 */
   4399 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4400 	if ((sc->sc_type < WM_T_PCH2)
   4401 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4402 		goto release;
   4403 
   4404 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4405 		device_xname(sc->sc_dev), __func__));
   4406 	/* word_addr is in DWORD */
   4407 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4408 
   4409 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4410 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4411 	if (cnf_size == 0)
   4412 		goto release;
   4413 
   4414 	if (((sc->sc_type == WM_T_PCH)
   4415 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4416 	    || (sc->sc_type > WM_T_PCH)) {
   4417 		/*
   4418 		 * HW configures the SMBus address and LEDs when the OEM and
   4419 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4420 		 * are cleared, SW will configure them instead.
   4421 		 */
   4422 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4423 			device_xname(sc->sc_dev), __func__));
   4424 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4425 			goto release;
   4426 
   4427 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4428 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4429 		    (uint16_t)reg);
   4430 		if (rv != 0)
   4431 			goto release;
   4432 	}
   4433 
   4434 	/* Configure LCD from extended configuration region. */
   4435 	for (i = 0; i < cnf_size; i++) {
   4436 		uint16_t reg_data, reg_addr;
   4437 
   4438 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4439 			goto release;
   4440 
   4441 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4442 			goto release;
   4443 
   4444 		if (reg_addr == IGPHY_PAGE_SELECT)
   4445 			phy_page = reg_data;
   4446 
   4447 		reg_addr &= IGPHY_MAXREGADDR;
   4448 		reg_addr |= phy_page;
   4449 
   4450 		KASSERT(sc->phy.writereg_locked != NULL);
   4451 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4452 		    reg_data);
   4453 	}
   4454 
   4455 release:
   4456 	sc->phy.release(sc);
   4457 	return rv;
   4458 }
   4459 
   4460 /*
   4461  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4462  *  @sc:       pointer to the HW structure
   4463  *  @d0_state: boolean if entering d0 or d3 device state
   4464  *
   4465  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4466  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4467  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4468  */
   4469 int
   4470 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4471 {
   4472 	uint32_t mac_reg;
   4473 	uint16_t oem_reg;
   4474 	int rv;
   4475 
   4476 	if (sc->sc_type < WM_T_PCH)
   4477 		return 0;
   4478 
   4479 	rv = sc->phy.acquire(sc);
   4480 	if (rv != 0)
   4481 		return rv;
   4482 
   4483 	if (sc->sc_type == WM_T_PCH) {
   4484 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4485 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4486 			goto release;
   4487 	}
   4488 
   4489 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4490 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4491 		goto release;
   4492 
   4493 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4494 
   4495 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4496 	if (rv != 0)
   4497 		goto release;
   4498 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4499 
   4500 	if (d0_state) {
   4501 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4502 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4503 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4504 			oem_reg |= HV_OEM_BITS_LPLU;
   4505 	} else {
   4506 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4507 		    != 0)
   4508 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4509 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4510 		    != 0)
   4511 			oem_reg |= HV_OEM_BITS_LPLU;
   4512 	}
   4513 
   4514 	/* Set Restart auto-neg to activate the bits */
   4515 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4516 	    && (wm_phy_resetisblocked(sc) == false))
   4517 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4518 
   4519 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4520 
   4521 release:
   4522 	sc->phy.release(sc);
   4523 
   4524 	return rv;
   4525 }
   4526 
   4527 /* Init hardware bits */
   4528 void
   4529 wm_initialize_hardware_bits(struct wm_softc *sc)
   4530 {
   4531 	uint32_t tarc0, tarc1, reg;
   4532 
   4533 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4534 		device_xname(sc->sc_dev), __func__));
   4535 
   4536 	/* For 82571 variant, 80003 and ICHs */
   4537 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4538 	    || (sc->sc_type >= WM_T_80003)) {
   4539 
   4540 		/* Transmit Descriptor Control 0 */
   4541 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4542 		reg |= TXDCTL_COUNT_DESC;
   4543 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4544 
   4545 		/* Transmit Descriptor Control 1 */
   4546 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4547 		reg |= TXDCTL_COUNT_DESC;
   4548 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4549 
   4550 		/* TARC0 */
   4551 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4552 		switch (sc->sc_type) {
   4553 		case WM_T_82571:
   4554 		case WM_T_82572:
   4555 		case WM_T_82573:
   4556 		case WM_T_82574:
   4557 		case WM_T_82583:
   4558 		case WM_T_80003:
   4559 			/* Clear bits 30..27 */
   4560 			tarc0 &= ~__BITS(30, 27);
   4561 			break;
   4562 		default:
   4563 			break;
   4564 		}
   4565 
   4566 		switch (sc->sc_type) {
   4567 		case WM_T_82571:
   4568 		case WM_T_82572:
   4569 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4570 
   4571 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4572 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4573 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4574 			/* 8257[12] Errata No.7 */
   4575 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4576 
   4577 			/* TARC1 bit 28 */
   4578 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4579 				tarc1 &= ~__BIT(28);
   4580 			else
   4581 				tarc1 |= __BIT(28);
   4582 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4583 
   4584 			/*
   4585 			 * 8257[12] Errata No.13
   4586 			 * Disable Dyamic Clock Gating.
   4587 			 */
   4588 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4589 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4590 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4591 			break;
   4592 		case WM_T_82573:
   4593 		case WM_T_82574:
   4594 		case WM_T_82583:
   4595 			if ((sc->sc_type == WM_T_82574)
   4596 			    || (sc->sc_type == WM_T_82583))
   4597 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4598 
   4599 			/* Extended Device Control */
   4600 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4601 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4602 			reg |= __BIT(22);	/* Set bit 22 */
   4603 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4604 
   4605 			/* Device Control */
   4606 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4607 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4608 
   4609 			/* PCIe Control Register */
   4610 			/*
   4611 			 * 82573 Errata (unknown).
   4612 			 *
   4613 			 * 82574 Errata 25 and 82583 Errata 12
   4614 			 * "Dropped Rx Packets":
   4615 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4616 			 */
   4617 			reg = CSR_READ(sc, WMREG_GCR);
   4618 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4619 			CSR_WRITE(sc, WMREG_GCR, reg);
   4620 
   4621 			if ((sc->sc_type == WM_T_82574)
   4622 			    || (sc->sc_type == WM_T_82583)) {
   4623 				/*
   4624 				 * Document says this bit must be set for
   4625 				 * proper operation.
   4626 				 */
   4627 				reg = CSR_READ(sc, WMREG_GCR);
   4628 				reg |= __BIT(22);
   4629 				CSR_WRITE(sc, WMREG_GCR, reg);
   4630 
   4631 				/*
   4632 				 * Apply workaround for hardware errata
   4633 				 * documented in errata docs Fixes issue where
   4634 				 * some error prone or unreliable PCIe
   4635 				 * completions are occurring, particularly
   4636 				 * with ASPM enabled. Without fix, issue can
   4637 				 * cause Tx timeouts.
   4638 				 */
   4639 				reg = CSR_READ(sc, WMREG_GCR2);
   4640 				reg |= __BIT(0);
   4641 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4642 			}
   4643 			break;
   4644 		case WM_T_80003:
   4645 			/* TARC0 */
   4646 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4647 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4648 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4649 
   4650 			/* TARC1 bit 28 */
   4651 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4652 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4653 				tarc1 &= ~__BIT(28);
   4654 			else
   4655 				tarc1 |= __BIT(28);
   4656 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4657 			break;
   4658 		case WM_T_ICH8:
   4659 		case WM_T_ICH9:
   4660 		case WM_T_ICH10:
   4661 		case WM_T_PCH:
   4662 		case WM_T_PCH2:
   4663 		case WM_T_PCH_LPT:
   4664 		case WM_T_PCH_SPT:
   4665 		case WM_T_PCH_CNP:
   4666 			/* TARC0 */
   4667 			if (sc->sc_type == WM_T_ICH8) {
   4668 				/* Set TARC0 bits 29 and 28 */
   4669 				tarc0 |= __BITS(29, 28);
   4670 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4671 				tarc0 |= __BIT(29);
   4672 				/*
   4673 				 *  Drop bit 28. From Linux.
   4674 				 * See I218/I219 spec update
   4675 				 * "5. Buffer Overrun While the I219 is
   4676 				 * Processing DMA Transactions"
   4677 				 */
   4678 				tarc0 &= ~__BIT(28);
   4679 			}
   4680 			/* Set TARC0 bits 23,24,26,27 */
   4681 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4682 
   4683 			/* CTRL_EXT */
   4684 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4685 			reg |= __BIT(22);	/* Set bit 22 */
   4686 			/*
   4687 			 * Enable PHY low-power state when MAC is at D3
   4688 			 * w/o WoL
   4689 			 */
   4690 			if (sc->sc_type >= WM_T_PCH)
   4691 				reg |= CTRL_EXT_PHYPDEN;
   4692 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4693 
   4694 			/* TARC1 */
   4695 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4696 			/* bit 28 */
   4697 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4698 				tarc1 &= ~__BIT(28);
   4699 			else
   4700 				tarc1 |= __BIT(28);
   4701 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4702 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4703 
   4704 			/* Device Status */
   4705 			if (sc->sc_type == WM_T_ICH8) {
   4706 				reg = CSR_READ(sc, WMREG_STATUS);
   4707 				reg &= ~__BIT(31);
   4708 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4709 
   4710 			}
   4711 
   4712 			/* IOSFPC */
   4713 			if (sc->sc_type == WM_T_PCH_SPT) {
   4714 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4715 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4716 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4717 			}
   4718 			/*
   4719 			 * Work-around descriptor data corruption issue during
   4720 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4721 			 * capability.
   4722 			 */
   4723 			reg = CSR_READ(sc, WMREG_RFCTL);
   4724 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4725 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4726 			break;
   4727 		default:
   4728 			break;
   4729 		}
   4730 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4731 
   4732 		switch (sc->sc_type) {
   4733 		/*
   4734 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4735 		 * Avoid RSS Hash Value bug.
   4736 		 */
   4737 		case WM_T_82571:
   4738 		case WM_T_82572:
   4739 		case WM_T_82573:
   4740 		case WM_T_80003:
   4741 		case WM_T_ICH8:
   4742 			reg = CSR_READ(sc, WMREG_RFCTL);
   4743 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4744 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4745 			break;
   4746 		case WM_T_82574:
   4747 			/* Use extened Rx descriptor. */
   4748 			reg = CSR_READ(sc, WMREG_RFCTL);
   4749 			reg |= WMREG_RFCTL_EXSTEN;
   4750 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4751 			break;
   4752 		default:
   4753 			break;
   4754 		}
   4755 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4756 		/*
   4757 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4758 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4759 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4760 		 * Correctly by the Device"
   4761 		 *
   4762 		 * I354(C2000) Errata AVR53:
   4763 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4764 		 * Hang"
   4765 		 */
   4766 		reg = CSR_READ(sc, WMREG_RFCTL);
   4767 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4768 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4769 	}
   4770 }
   4771 
   4772 static uint32_t
   4773 wm_rxpbs_adjust_82580(uint32_t val)
   4774 {
   4775 	uint32_t rv = 0;
   4776 
   4777 	if (val < __arraycount(wm_82580_rxpbs_table))
   4778 		rv = wm_82580_rxpbs_table[val];
   4779 
   4780 	return rv;
   4781 }
   4782 
   4783 /*
   4784  * wm_reset_phy:
   4785  *
   4786  *	generic PHY reset function.
   4787  *	Same as e1000_phy_hw_reset_generic()
   4788  */
   4789 static int
   4790 wm_reset_phy(struct wm_softc *sc)
   4791 {
   4792 	uint32_t reg;
   4793 
   4794 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4795 		device_xname(sc->sc_dev), __func__));
   4796 	if (wm_phy_resetisblocked(sc))
   4797 		return -1;
   4798 
   4799 	sc->phy.acquire(sc);
   4800 
   4801 	reg = CSR_READ(sc, WMREG_CTRL);
   4802 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4803 	CSR_WRITE_FLUSH(sc);
   4804 
   4805 	delay(sc->phy.reset_delay_us);
   4806 
   4807 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4808 	CSR_WRITE_FLUSH(sc);
   4809 
   4810 	delay(150);
   4811 
   4812 	sc->phy.release(sc);
   4813 
   4814 	wm_get_cfg_done(sc);
   4815 	wm_phy_post_reset(sc);
   4816 
   4817 	return 0;
   4818 }
   4819 
   4820 /*
   4821  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4822  * so it is enough to check sc->sc_queue[0] only.
   4823  */
   4824 static void
   4825 wm_flush_desc_rings(struct wm_softc *sc)
   4826 {
   4827 	pcireg_t preg;
   4828 	uint32_t reg;
   4829 	struct wm_txqueue *txq;
   4830 	wiseman_txdesc_t *txd;
   4831 	int nexttx;
   4832 	uint32_t rctl;
   4833 
   4834 	/* First, disable MULR fix in FEXTNVM11 */
   4835 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4836 	reg |= FEXTNVM11_DIS_MULRFIX;
   4837 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4838 
   4839 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4840 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4841 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4842 		return;
   4843 
   4844 	/* TX */
   4845 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4846 	    preg, reg);
   4847 	reg = CSR_READ(sc, WMREG_TCTL);
   4848 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4849 
   4850 	txq = &sc->sc_queue[0].wmq_txq;
   4851 	nexttx = txq->txq_next;
   4852 	txd = &txq->txq_descs[nexttx];
   4853 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4854 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4855 	txd->wtx_fields.wtxu_status = 0;
   4856 	txd->wtx_fields.wtxu_options = 0;
   4857 	txd->wtx_fields.wtxu_vlan = 0;
   4858 
   4859 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4860 	    BUS_SPACE_BARRIER_WRITE);
   4861 
   4862 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4863 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4864 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4865 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4866 	delay(250);
   4867 
   4868 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4869 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4870 		return;
   4871 
   4872 	/* RX */
   4873 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4874 	rctl = CSR_READ(sc, WMREG_RCTL);
   4875 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4876 	CSR_WRITE_FLUSH(sc);
   4877 	delay(150);
   4878 
   4879 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4880 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4881 	reg &= 0xffffc000;
   4882 	/*
   4883 	 * Update thresholds: prefetch threshold to 31, host threshold
   4884 	 * to 1 and make sure the granularity is "descriptors" and not
   4885 	 * "cache lines"
   4886 	 */
   4887 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4888 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4889 
   4890 	/* Momentarily enable the RX ring for the changes to take effect */
   4891 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4892 	CSR_WRITE_FLUSH(sc);
   4893 	delay(150);
   4894 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4895 }
   4896 
   4897 /*
   4898  * wm_reset:
   4899  *
   4900  *	Reset the i82542 chip.
   4901  */
   4902 static void
   4903 wm_reset(struct wm_softc *sc)
   4904 {
   4905 	int phy_reset = 0;
   4906 	int i, error = 0;
   4907 	uint32_t reg;
   4908 	uint16_t kmreg;
   4909 	int rv;
   4910 
   4911 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4912 		device_xname(sc->sc_dev), __func__));
   4913 	KASSERT(sc->sc_type != 0);
   4914 
   4915 	/*
   4916 	 * Allocate on-chip memory according to the MTU size.
   4917 	 * The Packet Buffer Allocation register must be written
   4918 	 * before the chip is reset.
   4919 	 */
   4920 	switch (sc->sc_type) {
   4921 	case WM_T_82547:
   4922 	case WM_T_82547_2:
   4923 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4924 		    PBA_22K : PBA_30K;
   4925 		for (i = 0; i < sc->sc_nqueues; i++) {
   4926 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4927 			txq->txq_fifo_head = 0;
   4928 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4929 			txq->txq_fifo_size =
   4930 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4931 			txq->txq_fifo_stall = 0;
   4932 		}
   4933 		break;
   4934 	case WM_T_82571:
   4935 	case WM_T_82572:
   4936 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4937 	case WM_T_80003:
   4938 		sc->sc_pba = PBA_32K;
   4939 		break;
   4940 	case WM_T_82573:
   4941 		sc->sc_pba = PBA_12K;
   4942 		break;
   4943 	case WM_T_82574:
   4944 	case WM_T_82583:
   4945 		sc->sc_pba = PBA_20K;
   4946 		break;
   4947 	case WM_T_82576:
   4948 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4949 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4950 		break;
   4951 	case WM_T_82580:
   4952 	case WM_T_I350:
   4953 	case WM_T_I354:
   4954 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4955 		break;
   4956 	case WM_T_I210:
   4957 	case WM_T_I211:
   4958 		sc->sc_pba = PBA_34K;
   4959 		break;
   4960 	case WM_T_ICH8:
   4961 		/* Workaround for a bit corruption issue in FIFO memory */
   4962 		sc->sc_pba = PBA_8K;
   4963 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4964 		break;
   4965 	case WM_T_ICH9:
   4966 	case WM_T_ICH10:
   4967 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4968 		    PBA_14K : PBA_10K;
   4969 		break;
   4970 	case WM_T_PCH:
   4971 	case WM_T_PCH2:	/* XXX 14K? */
   4972 	case WM_T_PCH_LPT:
   4973 	case WM_T_PCH_SPT:
   4974 	case WM_T_PCH_CNP:
   4975 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   4976 		    PBA_12K : PBA_26K;
   4977 		break;
   4978 	default:
   4979 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4980 		    PBA_40K : PBA_48K;
   4981 		break;
   4982 	}
   4983 	/*
   4984 	 * Only old or non-multiqueue devices have the PBA register
   4985 	 * XXX Need special handling for 82575.
   4986 	 */
   4987 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4988 	    || (sc->sc_type == WM_T_82575))
   4989 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4990 
   4991 	/* Prevent the PCI-E bus from sticking */
   4992 	if (sc->sc_flags & WM_F_PCIE) {
   4993 		int timeout = 800;
   4994 
   4995 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4996 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4997 
   4998 		while (timeout--) {
   4999 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5000 			    == 0)
   5001 				break;
   5002 			delay(100);
   5003 		}
   5004 		if (timeout == 0)
   5005 			device_printf(sc->sc_dev,
   5006 			    "failed to disable busmastering\n");
   5007 	}
   5008 
   5009 	/* Set the completion timeout for interface */
   5010 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5011 	    || (sc->sc_type == WM_T_82580)
   5012 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5013 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5014 		wm_set_pcie_completion_timeout(sc);
   5015 
   5016 	/* Clear interrupt */
   5017 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5018 	if (wm_is_using_msix(sc)) {
   5019 		if (sc->sc_type != WM_T_82574) {
   5020 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5021 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5022 		} else
   5023 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5024 	}
   5025 
   5026 	/* Stop the transmit and receive processes. */
   5027 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5028 	sc->sc_rctl &= ~RCTL_EN;
   5029 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5030 	CSR_WRITE_FLUSH(sc);
   5031 
   5032 	/* XXX set_tbi_sbp_82543() */
   5033 
   5034 	delay(10*1000);
   5035 
   5036 	/* Must acquire the MDIO ownership before MAC reset */
   5037 	switch (sc->sc_type) {
   5038 	case WM_T_82573:
   5039 	case WM_T_82574:
   5040 	case WM_T_82583:
   5041 		error = wm_get_hw_semaphore_82573(sc);
   5042 		break;
   5043 	default:
   5044 		break;
   5045 	}
   5046 
   5047 	/*
   5048 	 * 82541 Errata 29? & 82547 Errata 28?
   5049 	 * See also the description about PHY_RST bit in CTRL register
   5050 	 * in 8254x_GBe_SDM.pdf.
   5051 	 */
   5052 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5053 		CSR_WRITE(sc, WMREG_CTRL,
   5054 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5055 		CSR_WRITE_FLUSH(sc);
   5056 		delay(5000);
   5057 	}
   5058 
   5059 	switch (sc->sc_type) {
   5060 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5061 	case WM_T_82541:
   5062 	case WM_T_82541_2:
   5063 	case WM_T_82547:
   5064 	case WM_T_82547_2:
   5065 		/*
   5066 		 * On some chipsets, a reset through a memory-mapped write
   5067 		 * cycle can cause the chip to reset before completing the
   5068 		 * write cycle. This causes major headache that can be avoided
   5069 		 * by issuing the reset via indirect register writes through
   5070 		 * I/O space.
   5071 		 *
   5072 		 * So, if we successfully mapped the I/O BAR at attach time,
   5073 		 * use that. Otherwise, try our luck with a memory-mapped
   5074 		 * reset.
   5075 		 */
   5076 		if (sc->sc_flags & WM_F_IOH_VALID)
   5077 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5078 		else
   5079 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5080 		break;
   5081 	case WM_T_82545_3:
   5082 	case WM_T_82546_3:
   5083 		/* Use the shadow control register on these chips. */
   5084 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5085 		break;
   5086 	case WM_T_80003:
   5087 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5088 		sc->phy.acquire(sc);
   5089 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5090 		sc->phy.release(sc);
   5091 		break;
   5092 	case WM_T_ICH8:
   5093 	case WM_T_ICH9:
   5094 	case WM_T_ICH10:
   5095 	case WM_T_PCH:
   5096 	case WM_T_PCH2:
   5097 	case WM_T_PCH_LPT:
   5098 	case WM_T_PCH_SPT:
   5099 	case WM_T_PCH_CNP:
   5100 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5101 		if (wm_phy_resetisblocked(sc) == false) {
   5102 			/*
   5103 			 * Gate automatic PHY configuration by hardware on
   5104 			 * non-managed 82579
   5105 			 */
   5106 			if ((sc->sc_type == WM_T_PCH2)
   5107 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5108 				== 0))
   5109 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5110 
   5111 			reg |= CTRL_PHY_RESET;
   5112 			phy_reset = 1;
   5113 		} else
   5114 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5115 		sc->phy.acquire(sc);
   5116 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5117 		/* Don't insert a completion barrier when reset */
   5118 		delay(20*1000);
   5119 		mutex_exit(sc->sc_ich_phymtx);
   5120 		break;
   5121 	case WM_T_82580:
   5122 	case WM_T_I350:
   5123 	case WM_T_I354:
   5124 	case WM_T_I210:
   5125 	case WM_T_I211:
   5126 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5127 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5128 			CSR_WRITE_FLUSH(sc);
   5129 		delay(5000);
   5130 		break;
   5131 	case WM_T_82542_2_0:
   5132 	case WM_T_82542_2_1:
   5133 	case WM_T_82543:
   5134 	case WM_T_82540:
   5135 	case WM_T_82545:
   5136 	case WM_T_82546:
   5137 	case WM_T_82571:
   5138 	case WM_T_82572:
   5139 	case WM_T_82573:
   5140 	case WM_T_82574:
   5141 	case WM_T_82575:
   5142 	case WM_T_82576:
   5143 	case WM_T_82583:
   5144 	default:
   5145 		/* Everything else can safely use the documented method. */
   5146 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5147 		break;
   5148 	}
   5149 
   5150 	/* Must release the MDIO ownership after MAC reset */
   5151 	switch (sc->sc_type) {
   5152 	case WM_T_82573:
   5153 	case WM_T_82574:
   5154 	case WM_T_82583:
   5155 		if (error == 0)
   5156 			wm_put_hw_semaphore_82573(sc);
   5157 		break;
   5158 	default:
   5159 		break;
   5160 	}
   5161 
   5162 	/* Set Phy Config Counter to 50msec */
   5163 	if (sc->sc_type == WM_T_PCH2) {
   5164 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5165 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5166 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5167 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5168 	}
   5169 
   5170 	if (phy_reset != 0)
   5171 		wm_get_cfg_done(sc);
   5172 
   5173 	/* Reload EEPROM */
   5174 	switch (sc->sc_type) {
   5175 	case WM_T_82542_2_0:
   5176 	case WM_T_82542_2_1:
   5177 	case WM_T_82543:
   5178 	case WM_T_82544:
   5179 		delay(10);
   5180 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5181 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5182 		CSR_WRITE_FLUSH(sc);
   5183 		delay(2000);
   5184 		break;
   5185 	case WM_T_82540:
   5186 	case WM_T_82545:
   5187 	case WM_T_82545_3:
   5188 	case WM_T_82546:
   5189 	case WM_T_82546_3:
   5190 		delay(5*1000);
   5191 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5192 		break;
   5193 	case WM_T_82541:
   5194 	case WM_T_82541_2:
   5195 	case WM_T_82547:
   5196 	case WM_T_82547_2:
   5197 		delay(20000);
   5198 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5199 		break;
   5200 	case WM_T_82571:
   5201 	case WM_T_82572:
   5202 	case WM_T_82573:
   5203 	case WM_T_82574:
   5204 	case WM_T_82583:
   5205 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5206 			delay(10);
   5207 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5208 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5209 			CSR_WRITE_FLUSH(sc);
   5210 		}
   5211 		/* check EECD_EE_AUTORD */
   5212 		wm_get_auto_rd_done(sc);
   5213 		/*
   5214 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5215 		 * is set.
   5216 		 */
   5217 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5218 		    || (sc->sc_type == WM_T_82583))
   5219 			delay(25*1000);
   5220 		break;
   5221 	case WM_T_82575:
   5222 	case WM_T_82576:
   5223 	case WM_T_82580:
   5224 	case WM_T_I350:
   5225 	case WM_T_I354:
   5226 	case WM_T_I210:
   5227 	case WM_T_I211:
   5228 	case WM_T_80003:
   5229 		/* check EECD_EE_AUTORD */
   5230 		wm_get_auto_rd_done(sc);
   5231 		break;
   5232 	case WM_T_ICH8:
   5233 	case WM_T_ICH9:
   5234 	case WM_T_ICH10:
   5235 	case WM_T_PCH:
   5236 	case WM_T_PCH2:
   5237 	case WM_T_PCH_LPT:
   5238 	case WM_T_PCH_SPT:
   5239 	case WM_T_PCH_CNP:
   5240 		break;
   5241 	default:
   5242 		panic("%s: unknown type\n", __func__);
   5243 	}
   5244 
   5245 	/* Check whether EEPROM is present or not */
   5246 	switch (sc->sc_type) {
   5247 	case WM_T_82575:
   5248 	case WM_T_82576:
   5249 	case WM_T_82580:
   5250 	case WM_T_I350:
   5251 	case WM_T_I354:
   5252 	case WM_T_ICH8:
   5253 	case WM_T_ICH9:
   5254 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5255 			/* Not found */
   5256 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5257 			if (sc->sc_type == WM_T_82575)
   5258 				wm_reset_init_script_82575(sc);
   5259 		}
   5260 		break;
   5261 	default:
   5262 		break;
   5263 	}
   5264 
   5265 	if (phy_reset != 0)
   5266 		wm_phy_post_reset(sc);
   5267 
   5268 	if ((sc->sc_type == WM_T_82580)
   5269 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5270 		/* Clear global device reset status bit */
   5271 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5272 	}
   5273 
   5274 	/* Clear any pending interrupt events. */
   5275 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5276 	reg = CSR_READ(sc, WMREG_ICR);
   5277 	if (wm_is_using_msix(sc)) {
   5278 		if (sc->sc_type != WM_T_82574) {
   5279 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5280 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5281 		} else
   5282 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5283 	}
   5284 
   5285 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5286 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5287 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5288 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5289 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5290 		reg |= KABGTXD_BGSQLBIAS;
   5291 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5292 	}
   5293 
   5294 	/* Reload sc_ctrl */
   5295 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5296 
   5297 	wm_set_eee(sc);
   5298 
   5299 	/*
   5300 	 * For PCH, this write will make sure that any noise will be detected
   5301 	 * as a CRC error and be dropped rather than show up as a bad packet
   5302 	 * to the DMA engine
   5303 	 */
   5304 	if (sc->sc_type == WM_T_PCH)
   5305 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5306 
   5307 	if (sc->sc_type >= WM_T_82544)
   5308 		CSR_WRITE(sc, WMREG_WUC, 0);
   5309 
   5310 	if (sc->sc_type < WM_T_82575)
   5311 		wm_disable_aspm(sc); /* Workaround for some chips */
   5312 
   5313 	wm_reset_mdicnfg_82580(sc);
   5314 
   5315 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5316 		wm_pll_workaround_i210(sc);
   5317 
   5318 	if (sc->sc_type == WM_T_80003) {
   5319 		/* Default to TRUE to enable the MDIC W/A */
   5320 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5321 
   5322 		rv = wm_kmrn_readreg(sc,
   5323 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5324 		if (rv == 0) {
   5325 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5326 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5327 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5328 			else
   5329 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5330 		}
   5331 	}
   5332 }
   5333 
   5334 /*
   5335  * wm_add_rxbuf:
   5336  *
   5337  *	Add a receive buffer to the indiciated descriptor.
   5338  */
   5339 static int
   5340 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5341 {
   5342 	struct wm_softc *sc = rxq->rxq_sc;
   5343 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5344 	struct mbuf *m;
   5345 	int error;
   5346 
   5347 	KASSERT(mutex_owned(rxq->rxq_lock));
   5348 
   5349 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5350 	if (m == NULL)
   5351 		return ENOBUFS;
   5352 
   5353 	MCLGET(m, M_DONTWAIT);
   5354 	if ((m->m_flags & M_EXT) == 0) {
   5355 		m_freem(m);
   5356 		return ENOBUFS;
   5357 	}
   5358 
   5359 	if (rxs->rxs_mbuf != NULL)
   5360 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5361 
   5362 	rxs->rxs_mbuf = m;
   5363 
   5364 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5365 	/*
   5366 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5367 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5368 	 */
   5369 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5370 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5371 	if (error) {
   5372 		/* XXX XXX XXX */
   5373 		aprint_error_dev(sc->sc_dev,
   5374 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5375 		panic("wm_add_rxbuf");
   5376 	}
   5377 
   5378 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5379 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5380 
   5381 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5382 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5383 			wm_init_rxdesc(rxq, idx);
   5384 	} else
   5385 		wm_init_rxdesc(rxq, idx);
   5386 
   5387 	return 0;
   5388 }
   5389 
   5390 /*
   5391  * wm_rxdrain:
   5392  *
   5393  *	Drain the receive queue.
   5394  */
   5395 static void
   5396 wm_rxdrain(struct wm_rxqueue *rxq)
   5397 {
   5398 	struct wm_softc *sc = rxq->rxq_sc;
   5399 	struct wm_rxsoft *rxs;
   5400 	int i;
   5401 
   5402 	KASSERT(mutex_owned(rxq->rxq_lock));
   5403 
   5404 	for (i = 0; i < WM_NRXDESC; i++) {
   5405 		rxs = &rxq->rxq_soft[i];
   5406 		if (rxs->rxs_mbuf != NULL) {
   5407 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5408 			m_freem(rxs->rxs_mbuf);
   5409 			rxs->rxs_mbuf = NULL;
   5410 		}
   5411 	}
   5412 }
   5413 
   5414 /*
   5415  * Setup registers for RSS.
   5416  *
   5417  * XXX not yet VMDq support
   5418  */
   5419 static void
   5420 wm_init_rss(struct wm_softc *sc)
   5421 {
   5422 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5423 	int i;
   5424 
   5425 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5426 
   5427 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5428 		unsigned int qid, reta_ent;
   5429 
   5430 		qid  = i % sc->sc_nqueues;
   5431 		switch (sc->sc_type) {
   5432 		case WM_T_82574:
   5433 			reta_ent = __SHIFTIN(qid,
   5434 			    RETA_ENT_QINDEX_MASK_82574);
   5435 			break;
   5436 		case WM_T_82575:
   5437 			reta_ent = __SHIFTIN(qid,
   5438 			    RETA_ENT_QINDEX1_MASK_82575);
   5439 			break;
   5440 		default:
   5441 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5442 			break;
   5443 		}
   5444 
   5445 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5446 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5447 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5448 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5449 	}
   5450 
   5451 	rss_getkey((uint8_t *)rss_key);
   5452 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5453 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5454 
   5455 	if (sc->sc_type == WM_T_82574)
   5456 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5457 	else
   5458 		mrqc = MRQC_ENABLE_RSS_MQ;
   5459 
   5460 	/*
   5461 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5462 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5463 	 */
   5464 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5465 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5466 #if 0
   5467 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5468 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5469 #endif
   5470 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5471 
   5472 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5473 }
   5474 
   5475 /*
   5476  * Adjust TX and RX queue numbers which the system actulally uses.
   5477  *
   5478  * The numbers are affected by below parameters.
   5479  *     - The nubmer of hardware queues
   5480  *     - The number of MSI-X vectors (= "nvectors" argument)
   5481  *     - ncpu
   5482  */
   5483 static void
   5484 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5485 {
   5486 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5487 
   5488 	if (nvectors < 2) {
   5489 		sc->sc_nqueues = 1;
   5490 		return;
   5491 	}
   5492 
   5493 	switch (sc->sc_type) {
   5494 	case WM_T_82572:
   5495 		hw_ntxqueues = 2;
   5496 		hw_nrxqueues = 2;
   5497 		break;
   5498 	case WM_T_82574:
   5499 		hw_ntxqueues = 2;
   5500 		hw_nrxqueues = 2;
   5501 		break;
   5502 	case WM_T_82575:
   5503 		hw_ntxqueues = 4;
   5504 		hw_nrxqueues = 4;
   5505 		break;
   5506 	case WM_T_82576:
   5507 		hw_ntxqueues = 16;
   5508 		hw_nrxqueues = 16;
   5509 		break;
   5510 	case WM_T_82580:
   5511 	case WM_T_I350:
   5512 	case WM_T_I354:
   5513 		hw_ntxqueues = 8;
   5514 		hw_nrxqueues = 8;
   5515 		break;
   5516 	case WM_T_I210:
   5517 		hw_ntxqueues = 4;
   5518 		hw_nrxqueues = 4;
   5519 		break;
   5520 	case WM_T_I211:
   5521 		hw_ntxqueues = 2;
   5522 		hw_nrxqueues = 2;
   5523 		break;
   5524 		/*
   5525 		 * As below ethernet controllers does not support MSI-X,
   5526 		 * this driver let them not use multiqueue.
   5527 		 *     - WM_T_80003
   5528 		 *     - WM_T_ICH8
   5529 		 *     - WM_T_ICH9
   5530 		 *     - WM_T_ICH10
   5531 		 *     - WM_T_PCH
   5532 		 *     - WM_T_PCH2
   5533 		 *     - WM_T_PCH_LPT
   5534 		 */
   5535 	default:
   5536 		hw_ntxqueues = 1;
   5537 		hw_nrxqueues = 1;
   5538 		break;
   5539 	}
   5540 
   5541 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5542 
   5543 	/*
   5544 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5545 	 * the number of queues used actually.
   5546 	 */
   5547 	if (nvectors < hw_nqueues + 1)
   5548 		sc->sc_nqueues = nvectors - 1;
   5549 	else
   5550 		sc->sc_nqueues = hw_nqueues;
   5551 
   5552 	/*
   5553 	 * As queues more then cpus cannot improve scaling, we limit
   5554 	 * the number of queues used actually.
   5555 	 */
   5556 	if (ncpu < sc->sc_nqueues)
   5557 		sc->sc_nqueues = ncpu;
   5558 }
   5559 
   5560 static inline bool
   5561 wm_is_using_msix(struct wm_softc *sc)
   5562 {
   5563 
   5564 	return (sc->sc_nintrs > 1);
   5565 }
   5566 
   5567 static inline bool
   5568 wm_is_using_multiqueue(struct wm_softc *sc)
   5569 {
   5570 
   5571 	return (sc->sc_nqueues > 1);
   5572 }
   5573 
   5574 static int
   5575 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   5576 {
   5577 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5578 
   5579 	wmq->wmq_id = qidx;
   5580 	wmq->wmq_intr_idx = intr_idx;
   5581 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   5582 	    wm_handle_queue, wmq);
   5583 	if (wmq->wmq_si != NULL)
   5584 		return 0;
   5585 
   5586 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5587 	    wmq->wmq_id);
   5588 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5589 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5590 	return ENOMEM;
   5591 }
   5592 
   5593 /*
   5594  * Both single interrupt MSI and INTx can use this function.
   5595  */
   5596 static int
   5597 wm_setup_legacy(struct wm_softc *sc)
   5598 {
   5599 	pci_chipset_tag_t pc = sc->sc_pc;
   5600 	const char *intrstr = NULL;
   5601 	char intrbuf[PCI_INTRSTR_LEN];
   5602 	int error;
   5603 
   5604 	error = wm_alloc_txrx_queues(sc);
   5605 	if (error) {
   5606 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5607 		    error);
   5608 		return ENOMEM;
   5609 	}
   5610 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5611 	    sizeof(intrbuf));
   5612 #ifdef WM_MPSAFE
   5613 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5614 #endif
   5615 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5616 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5617 	if (sc->sc_ihs[0] == NULL) {
   5618 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5619 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5620 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5621 		return ENOMEM;
   5622 	}
   5623 
   5624 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5625 	sc->sc_nintrs = 1;
   5626 
   5627 	return wm_softint_establish_queue(sc, 0, 0);
   5628 }
   5629 
   5630 static int
   5631 wm_setup_msix(struct wm_softc *sc)
   5632 {
   5633 	void *vih;
   5634 	kcpuset_t *affinity;
   5635 	int qidx, error, intr_idx, txrx_established;
   5636 	pci_chipset_tag_t pc = sc->sc_pc;
   5637 	const char *intrstr = NULL;
   5638 	char intrbuf[PCI_INTRSTR_LEN];
   5639 	char intr_xname[INTRDEVNAMEBUF];
   5640 
   5641 	if (sc->sc_nqueues < ncpu) {
   5642 		/*
   5643 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5644 		 * interrupts start from CPU#1.
   5645 		 */
   5646 		sc->sc_affinity_offset = 1;
   5647 	} else {
   5648 		/*
   5649 		 * In this case, this device use all CPUs. So, we unify
   5650 		 * affinitied cpu_index to msix vector number for readability.
   5651 		 */
   5652 		sc->sc_affinity_offset = 0;
   5653 	}
   5654 
   5655 	error = wm_alloc_txrx_queues(sc);
   5656 	if (error) {
   5657 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5658 		    error);
   5659 		return ENOMEM;
   5660 	}
   5661 
   5662 	kcpuset_create(&affinity, false);
   5663 	intr_idx = 0;
   5664 
   5665 	/*
   5666 	 * TX and RX
   5667 	 */
   5668 	txrx_established = 0;
   5669 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5670 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5671 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5672 
   5673 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5674 		    sizeof(intrbuf));
   5675 #ifdef WM_MPSAFE
   5676 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5677 		    PCI_INTR_MPSAFE, true);
   5678 #endif
   5679 		memset(intr_xname, 0, sizeof(intr_xname));
   5680 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5681 		    device_xname(sc->sc_dev), qidx);
   5682 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5683 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5684 		if (vih == NULL) {
   5685 			aprint_error_dev(sc->sc_dev,
   5686 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5687 			    intrstr ? " at " : "",
   5688 			    intrstr ? intrstr : "");
   5689 
   5690 			goto fail;
   5691 		}
   5692 		kcpuset_zero(affinity);
   5693 		/* Round-robin affinity */
   5694 		kcpuset_set(affinity, affinity_to);
   5695 		error = interrupt_distribute(vih, affinity, NULL);
   5696 		if (error == 0) {
   5697 			aprint_normal_dev(sc->sc_dev,
   5698 			    "for TX and RX interrupting at %s affinity to %u\n",
   5699 			    intrstr, affinity_to);
   5700 		} else {
   5701 			aprint_normal_dev(sc->sc_dev,
   5702 			    "for TX and RX interrupting at %s\n", intrstr);
   5703 		}
   5704 		sc->sc_ihs[intr_idx] = vih;
   5705 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   5706 			goto fail;
   5707 		txrx_established++;
   5708 		intr_idx++;
   5709 	}
   5710 
   5711 	/* LINK */
   5712 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5713 	    sizeof(intrbuf));
   5714 #ifdef WM_MPSAFE
   5715 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5716 #endif
   5717 	memset(intr_xname, 0, sizeof(intr_xname));
   5718 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5719 	    device_xname(sc->sc_dev));
   5720 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5721 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5722 	if (vih == NULL) {
   5723 		aprint_error_dev(sc->sc_dev,
   5724 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5725 		    intrstr ? " at " : "",
   5726 		    intrstr ? intrstr : "");
   5727 
   5728 		goto fail;
   5729 	}
   5730 	/* Keep default affinity to LINK interrupt */
   5731 	aprint_normal_dev(sc->sc_dev,
   5732 	    "for LINK interrupting at %s\n", intrstr);
   5733 	sc->sc_ihs[intr_idx] = vih;
   5734 	sc->sc_link_intr_idx = intr_idx;
   5735 
   5736 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5737 	kcpuset_destroy(affinity);
   5738 	return 0;
   5739 
   5740  fail:
   5741 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5742 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5743 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5744 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5745 	}
   5746 
   5747 	kcpuset_destroy(affinity);
   5748 	return ENOMEM;
   5749 }
   5750 
   5751 static void
   5752 wm_unset_stopping_flags(struct wm_softc *sc)
   5753 {
   5754 	int i;
   5755 
   5756 	KASSERT(WM_CORE_LOCKED(sc));
   5757 
   5758 	/* Must unset stopping flags in ascending order. */
   5759 	for (i = 0; i < sc->sc_nqueues; i++) {
   5760 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5761 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5762 
   5763 		mutex_enter(txq->txq_lock);
   5764 		txq->txq_stopping = false;
   5765 		mutex_exit(txq->txq_lock);
   5766 
   5767 		mutex_enter(rxq->rxq_lock);
   5768 		rxq->rxq_stopping = false;
   5769 		mutex_exit(rxq->rxq_lock);
   5770 	}
   5771 
   5772 	sc->sc_core_stopping = false;
   5773 }
   5774 
   5775 static void
   5776 wm_set_stopping_flags(struct wm_softc *sc)
   5777 {
   5778 	int i;
   5779 
   5780 	KASSERT(WM_CORE_LOCKED(sc));
   5781 
   5782 	sc->sc_core_stopping = true;
   5783 
   5784 	/* Must set stopping flags in ascending order. */
   5785 	for (i = 0; i < sc->sc_nqueues; i++) {
   5786 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5787 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5788 
   5789 		mutex_enter(rxq->rxq_lock);
   5790 		rxq->rxq_stopping = true;
   5791 		mutex_exit(rxq->rxq_lock);
   5792 
   5793 		mutex_enter(txq->txq_lock);
   5794 		txq->txq_stopping = true;
   5795 		mutex_exit(txq->txq_lock);
   5796 	}
   5797 }
   5798 
   5799 /*
   5800  * Write interrupt interval value to ITR or EITR
   5801  */
   5802 static void
   5803 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5804 {
   5805 
   5806 	if (!wmq->wmq_set_itr)
   5807 		return;
   5808 
   5809 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5810 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5811 
   5812 		/*
   5813 		 * 82575 doesn't have CNT_INGR field.
   5814 		 * So, overwrite counter field by software.
   5815 		 */
   5816 		if (sc->sc_type == WM_T_82575)
   5817 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5818 		else
   5819 			eitr |= EITR_CNT_INGR;
   5820 
   5821 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5822 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5823 		/*
   5824 		 * 82574 has both ITR and EITR. SET EITR when we use
   5825 		 * the multi queue function with MSI-X.
   5826 		 */
   5827 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5828 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5829 	} else {
   5830 		KASSERT(wmq->wmq_id == 0);
   5831 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5832 	}
   5833 
   5834 	wmq->wmq_set_itr = false;
   5835 }
   5836 
   5837 /*
   5838  * TODO
   5839  * Below dynamic calculation of itr is almost the same as linux igb,
   5840  * however it does not fit to wm(4). So, we will have been disable AIM
   5841  * until we will find appropriate calculation of itr.
   5842  */
   5843 /*
   5844  * calculate interrupt interval value to be going to write register in
   5845  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5846  */
   5847 static void
   5848 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5849 {
   5850 #ifdef NOTYET
   5851 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5852 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5853 	uint32_t avg_size = 0;
   5854 	uint32_t new_itr;
   5855 
   5856 	if (rxq->rxq_packets)
   5857 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5858 	if (txq->txq_packets)
   5859 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5860 
   5861 	if (avg_size == 0) {
   5862 		new_itr = 450; /* restore default value */
   5863 		goto out;
   5864 	}
   5865 
   5866 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5867 	avg_size += 24;
   5868 
   5869 	/* Don't starve jumbo frames */
   5870 	avg_size = uimin(avg_size, 3000);
   5871 
   5872 	/* Give a little boost to mid-size frames */
   5873 	if ((avg_size > 300) && (avg_size < 1200))
   5874 		new_itr = avg_size / 3;
   5875 	else
   5876 		new_itr = avg_size / 2;
   5877 
   5878 out:
   5879 	/*
   5880 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5881 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5882 	 */
   5883 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5884 		new_itr *= 4;
   5885 
   5886 	if (new_itr != wmq->wmq_itr) {
   5887 		wmq->wmq_itr = new_itr;
   5888 		wmq->wmq_set_itr = true;
   5889 	} else
   5890 		wmq->wmq_set_itr = false;
   5891 
   5892 	rxq->rxq_packets = 0;
   5893 	rxq->rxq_bytes = 0;
   5894 	txq->txq_packets = 0;
   5895 	txq->txq_bytes = 0;
   5896 #endif
   5897 }
   5898 
   5899 static void
   5900 wm_init_sysctls(struct wm_softc *sc)
   5901 {
   5902 	struct sysctllog **log;
   5903 	const struct sysctlnode *rnode, *qnode, *cnode;
   5904 	int i, rv;
   5905 	const char *dvname;
   5906 
   5907 	log = &sc->sc_sysctllog;
   5908 	dvname = device_xname(sc->sc_dev);
   5909 
   5910 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5911 	    0, CTLTYPE_NODE, dvname,
   5912 	    SYSCTL_DESCR("wm information and settings"),
   5913 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5914 	if (rv != 0)
   5915 		goto err;
   5916 
   5917 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5918 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5919 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5920 	if (rv != 0)
   5921 		goto teardown;
   5922 
   5923 	for (i = 0; i < sc->sc_nqueues; i++) {
   5924 		struct wm_queue *wmq = &sc->sc_queue[i];
   5925 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5926 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5927 
   5928 		snprintf(sc->sc_queue[i].sysctlname,
   5929 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   5930 
   5931 		if (sysctl_createv(log, 0, &rnode, &qnode,
   5932 		    0, CTLTYPE_NODE,
   5933 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   5934 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   5935 			break;
   5936 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5937 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5938 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   5939 		    NULL, 0, &txq->txq_free,
   5940 		    0, CTL_CREATE, CTL_EOL) != 0)
   5941 			break;
   5942 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5943 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5944 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   5945 		    NULL, 0, &txq->txq_next,
   5946 		    0, CTL_CREATE, CTL_EOL) != 0)
   5947 			break;
   5948 
   5949 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5950 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5951 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   5952 		    NULL, 0, &rxq->rxq_ptr,
   5953 		    0, CTL_CREATE, CTL_EOL) != 0)
   5954 			break;
   5955 	}
   5956 
   5957 #ifdef WM_DEBUG
   5958 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5959 	    CTLTYPE_INT, "debug_flags",
   5960 	    SYSCTL_DESCR(
   5961 		    "Debug flags:\n"	\
   5962 		    "\t0x01 LINK\n"	\
   5963 		    "\t0x02 TX\n"	\
   5964 		    "\t0x04 RX\n"	\
   5965 		    "\t0x08 GMII\n"	\
   5966 		    "\t0x10 MANAGE\n"	\
   5967 		    "\t0x20 NVM\n"	\
   5968 		    "\t0x40 INIT\n"	\
   5969 		    "\t0x80 LOCK"),
   5970 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   5971 	if (rv != 0)
   5972 		goto teardown;
   5973 #endif
   5974 
   5975 	return;
   5976 
   5977 teardown:
   5978 	sysctl_teardown(log);
   5979 err:
   5980 	sc->sc_sysctllog = NULL;
   5981 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   5982 	    __func__, rv);
   5983 }
   5984 
   5985 /*
   5986  * wm_init:		[ifnet interface function]
   5987  *
   5988  *	Initialize the interface.
   5989  */
   5990 static int
   5991 wm_init(struct ifnet *ifp)
   5992 {
   5993 	struct wm_softc *sc = ifp->if_softc;
   5994 	int ret;
   5995 
   5996 	WM_CORE_LOCK(sc);
   5997 	ret = wm_init_locked(ifp);
   5998 	WM_CORE_UNLOCK(sc);
   5999 
   6000 	return ret;
   6001 }
   6002 
   6003 static int
   6004 wm_init_locked(struct ifnet *ifp)
   6005 {
   6006 	struct wm_softc *sc = ifp->if_softc;
   6007 	struct ethercom *ec = &sc->sc_ethercom;
   6008 	int i, j, trynum, error = 0;
   6009 	uint32_t reg, sfp_mask = 0;
   6010 
   6011 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6012 		device_xname(sc->sc_dev), __func__));
   6013 	KASSERT(WM_CORE_LOCKED(sc));
   6014 
   6015 	/*
   6016 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6017 	 * There is a small but measurable benefit to avoiding the adjusment
   6018 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6019 	 * on such platforms.  One possibility is that the DMA itself is
   6020 	 * slightly more efficient if the front of the entire packet (instead
   6021 	 * of the front of the headers) is aligned.
   6022 	 *
   6023 	 * Note we must always set align_tweak to 0 if we are using
   6024 	 * jumbo frames.
   6025 	 */
   6026 #ifdef __NO_STRICT_ALIGNMENT
   6027 	sc->sc_align_tweak = 0;
   6028 #else
   6029 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6030 		sc->sc_align_tweak = 0;
   6031 	else
   6032 		sc->sc_align_tweak = 2;
   6033 #endif /* __NO_STRICT_ALIGNMENT */
   6034 
   6035 	/* Cancel any pending I/O. */
   6036 	wm_stop_locked(ifp, false, false);
   6037 
   6038 	/* Update statistics before reset */
   6039 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6040 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6041 
   6042 	/* PCH_SPT hardware workaround */
   6043 	if (sc->sc_type == WM_T_PCH_SPT)
   6044 		wm_flush_desc_rings(sc);
   6045 
   6046 	/* Reset the chip to a known state. */
   6047 	wm_reset(sc);
   6048 
   6049 	/*
   6050 	 * AMT based hardware can now take control from firmware
   6051 	 * Do this after reset.
   6052 	 */
   6053 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6054 		wm_get_hw_control(sc);
   6055 
   6056 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6057 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6058 		wm_legacy_irq_quirk_spt(sc);
   6059 
   6060 	/* Init hardware bits */
   6061 	wm_initialize_hardware_bits(sc);
   6062 
   6063 	/* Reset the PHY. */
   6064 	if (sc->sc_flags & WM_F_HAS_MII)
   6065 		wm_gmii_reset(sc);
   6066 
   6067 	if (sc->sc_type >= WM_T_ICH8) {
   6068 		reg = CSR_READ(sc, WMREG_GCR);
   6069 		/*
   6070 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6071 		 * default after reset.
   6072 		 */
   6073 		if (sc->sc_type == WM_T_ICH8)
   6074 			reg |= GCR_NO_SNOOP_ALL;
   6075 		else
   6076 			reg &= ~GCR_NO_SNOOP_ALL;
   6077 		CSR_WRITE(sc, WMREG_GCR, reg);
   6078 	}
   6079 
   6080 	if ((sc->sc_type >= WM_T_ICH8)
   6081 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6082 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6083 
   6084 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6085 		reg |= CTRL_EXT_RO_DIS;
   6086 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6087 	}
   6088 
   6089 	/* Calculate (E)ITR value */
   6090 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6091 		/*
   6092 		 * For NEWQUEUE's EITR (except for 82575).
   6093 		 * 82575's EITR should be set same throttling value as other
   6094 		 * old controllers' ITR because the interrupt/sec calculation
   6095 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6096 		 *
   6097 		 * 82574's EITR should be set same throttling value as ITR.
   6098 		 *
   6099 		 * For N interrupts/sec, set this value to:
   6100 		 * 1,000,000 / N in contrast to ITR throttoling value.
   6101 		 */
   6102 		sc->sc_itr_init = 450;
   6103 	} else if (sc->sc_type >= WM_T_82543) {
   6104 		/*
   6105 		 * Set up the interrupt throttling register (units of 256ns)
   6106 		 * Note that a footnote in Intel's documentation says this
   6107 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6108 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6109 		 * that that is also true for the 1024ns units of the other
   6110 		 * interrupt-related timer registers -- so, really, we ought
   6111 		 * to divide this value by 4 when the link speed is low.
   6112 		 *
   6113 		 * XXX implement this division at link speed change!
   6114 		 */
   6115 
   6116 		/*
   6117 		 * For N interrupts/sec, set this value to:
   6118 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6119 		 * absolute and packet timer values to this value
   6120 		 * divided by 4 to get "simple timer" behavior.
   6121 		 */
   6122 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6123 	}
   6124 
   6125 	error = wm_init_txrx_queues(sc);
   6126 	if (error)
   6127 		goto out;
   6128 
   6129 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6130 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6131 	    (sc->sc_type >= WM_T_82575))
   6132 		wm_serdes_power_up_link_82575(sc);
   6133 
   6134 	/* Clear out the VLAN table -- we don't use it (yet). */
   6135 	CSR_WRITE(sc, WMREG_VET, 0);
   6136 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6137 		trynum = 10; /* Due to hw errata */
   6138 	else
   6139 		trynum = 1;
   6140 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6141 		for (j = 0; j < trynum; j++)
   6142 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6143 
   6144 	/*
   6145 	 * Set up flow-control parameters.
   6146 	 *
   6147 	 * XXX Values could probably stand some tuning.
   6148 	 */
   6149 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6150 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6151 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6152 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6153 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6154 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6155 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6156 	}
   6157 
   6158 	sc->sc_fcrtl = FCRTL_DFLT;
   6159 	if (sc->sc_type < WM_T_82543) {
   6160 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6161 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6162 	} else {
   6163 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6164 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6165 	}
   6166 
   6167 	if (sc->sc_type == WM_T_80003)
   6168 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6169 	else
   6170 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6171 
   6172 	/* Writes the control register. */
   6173 	wm_set_vlan(sc);
   6174 
   6175 	if (sc->sc_flags & WM_F_HAS_MII) {
   6176 		uint16_t kmreg;
   6177 
   6178 		switch (sc->sc_type) {
   6179 		case WM_T_80003:
   6180 		case WM_T_ICH8:
   6181 		case WM_T_ICH9:
   6182 		case WM_T_ICH10:
   6183 		case WM_T_PCH:
   6184 		case WM_T_PCH2:
   6185 		case WM_T_PCH_LPT:
   6186 		case WM_T_PCH_SPT:
   6187 		case WM_T_PCH_CNP:
   6188 			/*
   6189 			 * Set the mac to wait the maximum time between each
   6190 			 * iteration and increase the max iterations when
   6191 			 * polling the phy; this fixes erroneous timeouts at
   6192 			 * 10Mbps.
   6193 			 */
   6194 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6195 			    0xFFFF);
   6196 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6197 			    &kmreg);
   6198 			kmreg |= 0x3F;
   6199 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6200 			    kmreg);
   6201 			break;
   6202 		default:
   6203 			break;
   6204 		}
   6205 
   6206 		if (sc->sc_type == WM_T_80003) {
   6207 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6208 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6209 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6210 
   6211 			/* Bypass RX and TX FIFO's */
   6212 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6213 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6214 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6215 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6216 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6217 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6218 		}
   6219 	}
   6220 #if 0
   6221 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6222 #endif
   6223 
   6224 	/* Set up checksum offload parameters. */
   6225 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6226 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6227 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6228 		reg |= RXCSUM_IPOFL;
   6229 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6230 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6231 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6232 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6233 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6234 
   6235 	/* Set registers about MSI-X */
   6236 	if (wm_is_using_msix(sc)) {
   6237 		uint32_t ivar, qintr_idx;
   6238 		struct wm_queue *wmq;
   6239 		unsigned int qid;
   6240 
   6241 		if (sc->sc_type == WM_T_82575) {
   6242 			/* Interrupt control */
   6243 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6244 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6245 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6246 
   6247 			/* TX and RX */
   6248 			for (i = 0; i < sc->sc_nqueues; i++) {
   6249 				wmq = &sc->sc_queue[i];
   6250 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6251 				    EITR_TX_QUEUE(wmq->wmq_id)
   6252 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6253 			}
   6254 			/* Link status */
   6255 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6256 			    EITR_OTHER);
   6257 		} else if (sc->sc_type == WM_T_82574) {
   6258 			/* Interrupt control */
   6259 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6260 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6261 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6262 
   6263 			/*
   6264 			 * Workaround issue with spurious interrupts
   6265 			 * in MSI-X mode.
   6266 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6267 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6268 			 */
   6269 			reg = CSR_READ(sc, WMREG_RFCTL);
   6270 			reg |= WMREG_RFCTL_ACKDIS;
   6271 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6272 
   6273 			ivar = 0;
   6274 			/* TX and RX */
   6275 			for (i = 0; i < sc->sc_nqueues; i++) {
   6276 				wmq = &sc->sc_queue[i];
   6277 				qid = wmq->wmq_id;
   6278 				qintr_idx = wmq->wmq_intr_idx;
   6279 
   6280 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6281 				    IVAR_TX_MASK_Q_82574(qid));
   6282 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6283 				    IVAR_RX_MASK_Q_82574(qid));
   6284 			}
   6285 			/* Link status */
   6286 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6287 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6288 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6289 		} else {
   6290 			/* Interrupt control */
   6291 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6292 			    | GPIE_EIAME | GPIE_PBA);
   6293 
   6294 			switch (sc->sc_type) {
   6295 			case WM_T_82580:
   6296 			case WM_T_I350:
   6297 			case WM_T_I354:
   6298 			case WM_T_I210:
   6299 			case WM_T_I211:
   6300 				/* TX and RX */
   6301 				for (i = 0; i < sc->sc_nqueues; i++) {
   6302 					wmq = &sc->sc_queue[i];
   6303 					qid = wmq->wmq_id;
   6304 					qintr_idx = wmq->wmq_intr_idx;
   6305 
   6306 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6307 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6308 					ivar |= __SHIFTIN((qintr_idx
   6309 						| IVAR_VALID),
   6310 					    IVAR_TX_MASK_Q(qid));
   6311 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6312 					ivar |= __SHIFTIN((qintr_idx
   6313 						| IVAR_VALID),
   6314 					    IVAR_RX_MASK_Q(qid));
   6315 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6316 				}
   6317 				break;
   6318 			case WM_T_82576:
   6319 				/* TX and RX */
   6320 				for (i = 0; i < sc->sc_nqueues; i++) {
   6321 					wmq = &sc->sc_queue[i];
   6322 					qid = wmq->wmq_id;
   6323 					qintr_idx = wmq->wmq_intr_idx;
   6324 
   6325 					ivar = CSR_READ(sc,
   6326 					    WMREG_IVAR_Q_82576(qid));
   6327 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6328 					ivar |= __SHIFTIN((qintr_idx
   6329 						| IVAR_VALID),
   6330 					    IVAR_TX_MASK_Q_82576(qid));
   6331 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6332 					ivar |= __SHIFTIN((qintr_idx
   6333 						| IVAR_VALID),
   6334 					    IVAR_RX_MASK_Q_82576(qid));
   6335 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6336 					    ivar);
   6337 				}
   6338 				break;
   6339 			default:
   6340 				break;
   6341 			}
   6342 
   6343 			/* Link status */
   6344 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6345 			    IVAR_MISC_OTHER);
   6346 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6347 		}
   6348 
   6349 		if (wm_is_using_multiqueue(sc)) {
   6350 			wm_init_rss(sc);
   6351 
   6352 			/*
   6353 			** NOTE: Receive Full-Packet Checksum Offload
   6354 			** is mutually exclusive with Multiqueue. However
   6355 			** this is not the same as TCP/IP checksums which
   6356 			** still work.
   6357 			*/
   6358 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6359 			reg |= RXCSUM_PCSD;
   6360 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6361 		}
   6362 	}
   6363 
   6364 	/* Set up the interrupt registers. */
   6365 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6366 
   6367 	/* Enable SFP module insertion interrupt if it's required */
   6368 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6369 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6370 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6371 		sfp_mask = ICR_GPI(0);
   6372 	}
   6373 
   6374 	if (wm_is_using_msix(sc)) {
   6375 		uint32_t mask;
   6376 		struct wm_queue *wmq;
   6377 
   6378 		switch (sc->sc_type) {
   6379 		case WM_T_82574:
   6380 			mask = 0;
   6381 			for (i = 0; i < sc->sc_nqueues; i++) {
   6382 				wmq = &sc->sc_queue[i];
   6383 				mask |= ICR_TXQ(wmq->wmq_id);
   6384 				mask |= ICR_RXQ(wmq->wmq_id);
   6385 			}
   6386 			mask |= ICR_OTHER;
   6387 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6388 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6389 			break;
   6390 		default:
   6391 			if (sc->sc_type == WM_T_82575) {
   6392 				mask = 0;
   6393 				for (i = 0; i < sc->sc_nqueues; i++) {
   6394 					wmq = &sc->sc_queue[i];
   6395 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6396 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6397 				}
   6398 				mask |= EITR_OTHER;
   6399 			} else {
   6400 				mask = 0;
   6401 				for (i = 0; i < sc->sc_nqueues; i++) {
   6402 					wmq = &sc->sc_queue[i];
   6403 					mask |= 1 << wmq->wmq_intr_idx;
   6404 				}
   6405 				mask |= 1 << sc->sc_link_intr_idx;
   6406 			}
   6407 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6408 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6409 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6410 
   6411 			/* For other interrupts */
   6412 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6413 			break;
   6414 		}
   6415 	} else {
   6416 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6417 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6418 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6419 	}
   6420 
   6421 	/* Set up the inter-packet gap. */
   6422 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6423 
   6424 	if (sc->sc_type >= WM_T_82543) {
   6425 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6426 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6427 			wm_itrs_writereg(sc, wmq);
   6428 		}
   6429 		/*
   6430 		 * Link interrupts occur much less than TX
   6431 		 * interrupts and RX interrupts. So, we don't
   6432 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6433 		 * FreeBSD's if_igb.
   6434 		 */
   6435 	}
   6436 
   6437 	/* Set the VLAN ethernetype. */
   6438 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6439 
   6440 	/*
   6441 	 * Set up the transmit control register; we start out with
   6442 	 * a collision distance suitable for FDX, but update it whe
   6443 	 * we resolve the media type.
   6444 	 */
   6445 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6446 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6447 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6448 	if (sc->sc_type >= WM_T_82571)
   6449 		sc->sc_tctl |= TCTL_MULR;
   6450 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6451 
   6452 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6453 		/* Write TDT after TCTL.EN is set. See the document. */
   6454 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6455 	}
   6456 
   6457 	if (sc->sc_type == WM_T_80003) {
   6458 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6459 		reg &= ~TCTL_EXT_GCEX_MASK;
   6460 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6461 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6462 	}
   6463 
   6464 	/* Set the media. */
   6465 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6466 		goto out;
   6467 
   6468 	/* Configure for OS presence */
   6469 	wm_init_manageability(sc);
   6470 
   6471 	/*
   6472 	 * Set up the receive control register; we actually program the
   6473 	 * register when we set the receive filter. Use multicast address
   6474 	 * offset type 0.
   6475 	 *
   6476 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6477 	 * don't enable that feature.
   6478 	 */
   6479 	sc->sc_mchash_type = 0;
   6480 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6481 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6482 
   6483 	/* 82574 use one buffer extended Rx descriptor. */
   6484 	if (sc->sc_type == WM_T_82574)
   6485 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6486 
   6487 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   6488 		sc->sc_rctl |= RCTL_SECRC;
   6489 
   6490 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6491 	    && (ifp->if_mtu > ETHERMTU)) {
   6492 		sc->sc_rctl |= RCTL_LPE;
   6493 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6494 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6495 	}
   6496 
   6497 	if (MCLBYTES == 2048)
   6498 		sc->sc_rctl |= RCTL_2k;
   6499 	else {
   6500 		if (sc->sc_type >= WM_T_82543) {
   6501 			switch (MCLBYTES) {
   6502 			case 4096:
   6503 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6504 				break;
   6505 			case 8192:
   6506 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6507 				break;
   6508 			case 16384:
   6509 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6510 				break;
   6511 			default:
   6512 				panic("wm_init: MCLBYTES %d unsupported",
   6513 				    MCLBYTES);
   6514 				break;
   6515 			}
   6516 		} else
   6517 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6518 	}
   6519 
   6520 	/* Enable ECC */
   6521 	switch (sc->sc_type) {
   6522 	case WM_T_82571:
   6523 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6524 		reg |= PBA_ECC_CORR_EN;
   6525 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6526 		break;
   6527 	case WM_T_PCH_LPT:
   6528 	case WM_T_PCH_SPT:
   6529 	case WM_T_PCH_CNP:
   6530 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6531 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6532 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6533 
   6534 		sc->sc_ctrl |= CTRL_MEHE;
   6535 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6536 		break;
   6537 	default:
   6538 		break;
   6539 	}
   6540 
   6541 	/*
   6542 	 * Set the receive filter.
   6543 	 *
   6544 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6545 	 * the setting of RCTL.EN in wm_set_filter()
   6546 	 */
   6547 	wm_set_filter(sc);
   6548 
   6549 	/* On 575 and later set RDT only if RX enabled */
   6550 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6551 		int qidx;
   6552 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6553 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6554 			for (i = 0; i < WM_NRXDESC; i++) {
   6555 				mutex_enter(rxq->rxq_lock);
   6556 				wm_init_rxdesc(rxq, i);
   6557 				mutex_exit(rxq->rxq_lock);
   6558 
   6559 			}
   6560 		}
   6561 	}
   6562 
   6563 	wm_unset_stopping_flags(sc);
   6564 
   6565 	/* Start the one second link check clock. */
   6566 	callout_schedule(&sc->sc_tick_ch, hz);
   6567 
   6568 	/* ...all done! */
   6569 	ifp->if_flags |= IFF_RUNNING;
   6570 
   6571  out:
   6572 	/* Save last flags for the callback */
   6573 	sc->sc_if_flags = ifp->if_flags;
   6574 	sc->sc_ec_capenable = ec->ec_capenable;
   6575 	if (error)
   6576 		log(LOG_ERR, "%s: interface not running\n",
   6577 		    device_xname(sc->sc_dev));
   6578 	return error;
   6579 }
   6580 
   6581 /*
   6582  * wm_stop:		[ifnet interface function]
   6583  *
   6584  *	Stop transmission on the interface.
   6585  */
   6586 static void
   6587 wm_stop(struct ifnet *ifp, int disable)
   6588 {
   6589 	struct wm_softc *sc = ifp->if_softc;
   6590 
   6591 	ASSERT_SLEEPABLE();
   6592 
   6593 	WM_CORE_LOCK(sc);
   6594 	wm_stop_locked(ifp, disable ? true : false, true);
   6595 	WM_CORE_UNLOCK(sc);
   6596 
   6597 	/*
   6598 	 * After wm_set_stopping_flags(), it is guaranteed
   6599 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6600 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6601 	 * because it can sleep...
   6602 	 * so, call workqueue_wait() here.
   6603 	 */
   6604 	for (int i = 0; i < sc->sc_nqueues; i++)
   6605 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6606 }
   6607 
   6608 static void
   6609 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   6610 {
   6611 	struct wm_softc *sc = ifp->if_softc;
   6612 	struct wm_txsoft *txs;
   6613 	int i, qidx;
   6614 
   6615 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6616 		device_xname(sc->sc_dev), __func__));
   6617 	KASSERT(WM_CORE_LOCKED(sc));
   6618 
   6619 	wm_set_stopping_flags(sc);
   6620 
   6621 	if (sc->sc_flags & WM_F_HAS_MII) {
   6622 		/* Down the MII. */
   6623 		mii_down(&sc->sc_mii);
   6624 	} else {
   6625 #if 0
   6626 		/* Should we clear PHY's status properly? */
   6627 		wm_reset(sc);
   6628 #endif
   6629 	}
   6630 
   6631 	/* Stop the transmit and receive processes. */
   6632 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6633 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6634 	sc->sc_rctl &= ~RCTL_EN;
   6635 
   6636 	/*
   6637 	 * Clear the interrupt mask to ensure the device cannot assert its
   6638 	 * interrupt line.
   6639 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6640 	 * service any currently pending or shared interrupt.
   6641 	 */
   6642 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6643 	sc->sc_icr = 0;
   6644 	if (wm_is_using_msix(sc)) {
   6645 		if (sc->sc_type != WM_T_82574) {
   6646 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6647 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6648 		} else
   6649 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6650 	}
   6651 
   6652 	/*
   6653 	 * Stop callouts after interrupts are disabled; if we have
   6654 	 * to wait for them, we will be releasing the CORE_LOCK
   6655 	 * briefly, which will unblock interrupts on the current CPU.
   6656 	 */
   6657 
   6658 	/* Stop the one second clock. */
   6659 	if (wait)
   6660 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   6661 	else
   6662 		callout_stop(&sc->sc_tick_ch);
   6663 
   6664 	/* Stop the 82547 Tx FIFO stall check timer. */
   6665 	if (sc->sc_type == WM_T_82547) {
   6666 		if (wait)
   6667 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   6668 		else
   6669 			callout_stop(&sc->sc_txfifo_ch);
   6670 	}
   6671 
   6672 	/* Release any queued transmit buffers. */
   6673 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6674 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6675 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6676 		struct mbuf *m;
   6677 
   6678 		mutex_enter(txq->txq_lock);
   6679 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6680 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6681 			txs = &txq->txq_soft[i];
   6682 			if (txs->txs_mbuf != NULL) {
   6683 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6684 				m_freem(txs->txs_mbuf);
   6685 				txs->txs_mbuf = NULL;
   6686 			}
   6687 		}
   6688 		/* Drain txq_interq */
   6689 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6690 			m_freem(m);
   6691 		mutex_exit(txq->txq_lock);
   6692 	}
   6693 
   6694 	/* Mark the interface as down and cancel the watchdog timer. */
   6695 	ifp->if_flags &= ~IFF_RUNNING;
   6696 
   6697 	if (disable) {
   6698 		for (i = 0; i < sc->sc_nqueues; i++) {
   6699 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6700 			mutex_enter(rxq->rxq_lock);
   6701 			wm_rxdrain(rxq);
   6702 			mutex_exit(rxq->rxq_lock);
   6703 		}
   6704 	}
   6705 
   6706 #if 0 /* notyet */
   6707 	if (sc->sc_type >= WM_T_82544)
   6708 		CSR_WRITE(sc, WMREG_WUC, 0);
   6709 #endif
   6710 }
   6711 
   6712 static void
   6713 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6714 {
   6715 	struct mbuf *m;
   6716 	int i;
   6717 
   6718 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6719 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6720 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6721 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6722 		    m->m_data, m->m_len, m->m_flags);
   6723 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6724 	    i, i == 1 ? "" : "s");
   6725 }
   6726 
   6727 /*
   6728  * wm_82547_txfifo_stall:
   6729  *
   6730  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6731  *	reset the FIFO pointers, and restart packet transmission.
   6732  */
   6733 static void
   6734 wm_82547_txfifo_stall(void *arg)
   6735 {
   6736 	struct wm_softc *sc = arg;
   6737 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6738 
   6739 	mutex_enter(txq->txq_lock);
   6740 
   6741 	if (txq->txq_stopping)
   6742 		goto out;
   6743 
   6744 	if (txq->txq_fifo_stall) {
   6745 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6746 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6747 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6748 			/*
   6749 			 * Packets have drained.  Stop transmitter, reset
   6750 			 * FIFO pointers, restart transmitter, and kick
   6751 			 * the packet queue.
   6752 			 */
   6753 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6754 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6755 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6756 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6757 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6758 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6759 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6760 			CSR_WRITE_FLUSH(sc);
   6761 
   6762 			txq->txq_fifo_head = 0;
   6763 			txq->txq_fifo_stall = 0;
   6764 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6765 		} else {
   6766 			/*
   6767 			 * Still waiting for packets to drain; try again in
   6768 			 * another tick.
   6769 			 */
   6770 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6771 		}
   6772 	}
   6773 
   6774 out:
   6775 	mutex_exit(txq->txq_lock);
   6776 }
   6777 
   6778 /*
   6779  * wm_82547_txfifo_bugchk:
   6780  *
   6781  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6782  *	prevent enqueueing a packet that would wrap around the end
   6783  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6784  *
   6785  *	We do this by checking the amount of space before the end
   6786  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6787  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6788  *	the internal FIFO pointers to the beginning, and restart
   6789  *	transmission on the interface.
   6790  */
   6791 #define	WM_FIFO_HDR		0x10
   6792 #define	WM_82547_PAD_LEN	0x3e0
   6793 static int
   6794 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6795 {
   6796 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6797 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6798 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6799 
   6800 	/* Just return if already stalled. */
   6801 	if (txq->txq_fifo_stall)
   6802 		return 1;
   6803 
   6804 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6805 		/* Stall only occurs in half-duplex mode. */
   6806 		goto send_packet;
   6807 	}
   6808 
   6809 	if (len >= WM_82547_PAD_LEN + space) {
   6810 		txq->txq_fifo_stall = 1;
   6811 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6812 		return 1;
   6813 	}
   6814 
   6815  send_packet:
   6816 	txq->txq_fifo_head += len;
   6817 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6818 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6819 
   6820 	return 0;
   6821 }
   6822 
   6823 static int
   6824 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6825 {
   6826 	int error;
   6827 
   6828 	/*
   6829 	 * Allocate the control data structures, and create and load the
   6830 	 * DMA map for it.
   6831 	 *
   6832 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6833 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6834 	 * both sets within the same 4G segment.
   6835 	 */
   6836 	if (sc->sc_type < WM_T_82544)
   6837 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6838 	else
   6839 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6840 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6841 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6842 	else
   6843 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6844 
   6845 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6846 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6847 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6848 		aprint_error_dev(sc->sc_dev,
   6849 		    "unable to allocate TX control data, error = %d\n",
   6850 		    error);
   6851 		goto fail_0;
   6852 	}
   6853 
   6854 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6855 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6856 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6857 		aprint_error_dev(sc->sc_dev,
   6858 		    "unable to map TX control data, error = %d\n", error);
   6859 		goto fail_1;
   6860 	}
   6861 
   6862 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6863 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6864 		aprint_error_dev(sc->sc_dev,
   6865 		    "unable to create TX control data DMA map, error = %d\n",
   6866 		    error);
   6867 		goto fail_2;
   6868 	}
   6869 
   6870 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6871 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6872 		aprint_error_dev(sc->sc_dev,
   6873 		    "unable to load TX control data DMA map, error = %d\n",
   6874 		    error);
   6875 		goto fail_3;
   6876 	}
   6877 
   6878 	return 0;
   6879 
   6880  fail_3:
   6881 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6882  fail_2:
   6883 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6884 	    WM_TXDESCS_SIZE(txq));
   6885  fail_1:
   6886 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6887  fail_0:
   6888 	return error;
   6889 }
   6890 
   6891 static void
   6892 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6893 {
   6894 
   6895 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6896 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6897 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6898 	    WM_TXDESCS_SIZE(txq));
   6899 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6900 }
   6901 
   6902 static int
   6903 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6904 {
   6905 	int error;
   6906 	size_t rxq_descs_size;
   6907 
   6908 	/*
   6909 	 * Allocate the control data structures, and create and load the
   6910 	 * DMA map for it.
   6911 	 *
   6912 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6913 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6914 	 * both sets within the same 4G segment.
   6915 	 */
   6916 	rxq->rxq_ndesc = WM_NRXDESC;
   6917 	if (sc->sc_type == WM_T_82574)
   6918 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6919 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6920 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6921 	else
   6922 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6923 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6924 
   6925 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6926 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6927 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6928 		aprint_error_dev(sc->sc_dev,
   6929 		    "unable to allocate RX control data, error = %d\n",
   6930 		    error);
   6931 		goto fail_0;
   6932 	}
   6933 
   6934 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6935 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6936 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6937 		aprint_error_dev(sc->sc_dev,
   6938 		    "unable to map RX control data, error = %d\n", error);
   6939 		goto fail_1;
   6940 	}
   6941 
   6942 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6943 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6944 		aprint_error_dev(sc->sc_dev,
   6945 		    "unable to create RX control data DMA map, error = %d\n",
   6946 		    error);
   6947 		goto fail_2;
   6948 	}
   6949 
   6950 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6951 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6952 		aprint_error_dev(sc->sc_dev,
   6953 		    "unable to load RX control data DMA map, error = %d\n",
   6954 		    error);
   6955 		goto fail_3;
   6956 	}
   6957 
   6958 	return 0;
   6959 
   6960  fail_3:
   6961 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6962  fail_2:
   6963 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6964 	    rxq_descs_size);
   6965  fail_1:
   6966 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6967  fail_0:
   6968 	return error;
   6969 }
   6970 
   6971 static void
   6972 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6973 {
   6974 
   6975 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6976 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6977 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6978 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6979 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6980 }
   6981 
   6982 
   6983 static int
   6984 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6985 {
   6986 	int i, error;
   6987 
   6988 	/* Create the transmit buffer DMA maps. */
   6989 	WM_TXQUEUELEN(txq) =
   6990 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6991 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6992 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6993 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6994 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6995 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6996 			aprint_error_dev(sc->sc_dev,
   6997 			    "unable to create Tx DMA map %d, error = %d\n",
   6998 			    i, error);
   6999 			goto fail;
   7000 		}
   7001 	}
   7002 
   7003 	return 0;
   7004 
   7005  fail:
   7006 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7007 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7008 			bus_dmamap_destroy(sc->sc_dmat,
   7009 			    txq->txq_soft[i].txs_dmamap);
   7010 	}
   7011 	return error;
   7012 }
   7013 
   7014 static void
   7015 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7016 {
   7017 	int i;
   7018 
   7019 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7020 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7021 			bus_dmamap_destroy(sc->sc_dmat,
   7022 			    txq->txq_soft[i].txs_dmamap);
   7023 	}
   7024 }
   7025 
   7026 static int
   7027 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7028 {
   7029 	int i, error;
   7030 
   7031 	/* Create the receive buffer DMA maps. */
   7032 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7033 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7034 			    MCLBYTES, 0, 0,
   7035 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7036 			aprint_error_dev(sc->sc_dev,
   7037 			    "unable to create Rx DMA map %d error = %d\n",
   7038 			    i, error);
   7039 			goto fail;
   7040 		}
   7041 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7042 	}
   7043 
   7044 	return 0;
   7045 
   7046  fail:
   7047 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7048 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7049 			bus_dmamap_destroy(sc->sc_dmat,
   7050 			    rxq->rxq_soft[i].rxs_dmamap);
   7051 	}
   7052 	return error;
   7053 }
   7054 
   7055 static void
   7056 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7057 {
   7058 	int i;
   7059 
   7060 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7061 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7062 			bus_dmamap_destroy(sc->sc_dmat,
   7063 			    rxq->rxq_soft[i].rxs_dmamap);
   7064 	}
   7065 }
   7066 
   7067 /*
   7068  * wm_alloc_quques:
   7069  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7070  */
   7071 static int
   7072 wm_alloc_txrx_queues(struct wm_softc *sc)
   7073 {
   7074 	int i, error, tx_done, rx_done;
   7075 
   7076 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7077 	    KM_SLEEP);
   7078 	if (sc->sc_queue == NULL) {
   7079 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7080 		error = ENOMEM;
   7081 		goto fail_0;
   7082 	}
   7083 
   7084 	/* For transmission */
   7085 	error = 0;
   7086 	tx_done = 0;
   7087 	for (i = 0; i < sc->sc_nqueues; i++) {
   7088 #ifdef WM_EVENT_COUNTERS
   7089 		int j;
   7090 		const char *xname;
   7091 #endif
   7092 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7093 		txq->txq_sc = sc;
   7094 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7095 
   7096 		error = wm_alloc_tx_descs(sc, txq);
   7097 		if (error)
   7098 			break;
   7099 		error = wm_alloc_tx_buffer(sc, txq);
   7100 		if (error) {
   7101 			wm_free_tx_descs(sc, txq);
   7102 			break;
   7103 		}
   7104 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7105 		if (txq->txq_interq == NULL) {
   7106 			wm_free_tx_descs(sc, txq);
   7107 			wm_free_tx_buffer(sc, txq);
   7108 			error = ENOMEM;
   7109 			break;
   7110 		}
   7111 
   7112 #ifdef WM_EVENT_COUNTERS
   7113 		xname = device_xname(sc->sc_dev);
   7114 
   7115 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7116 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7117 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7118 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7119 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7120 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7121 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7122 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7123 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7124 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7125 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7126 
   7127 		for (j = 0; j < WM_NTXSEGS; j++) {
   7128 			snprintf(txq->txq_txseg_evcnt_names[j],
   7129 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   7130 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   7131 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7132 		}
   7133 
   7134 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7135 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7136 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7137 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7138 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7139 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7140 #endif /* WM_EVENT_COUNTERS */
   7141 
   7142 		tx_done++;
   7143 	}
   7144 	if (error)
   7145 		goto fail_1;
   7146 
   7147 	/* For receive */
   7148 	error = 0;
   7149 	rx_done = 0;
   7150 	for (i = 0; i < sc->sc_nqueues; i++) {
   7151 #ifdef WM_EVENT_COUNTERS
   7152 		const char *xname;
   7153 #endif
   7154 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7155 		rxq->rxq_sc = sc;
   7156 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7157 
   7158 		error = wm_alloc_rx_descs(sc, rxq);
   7159 		if (error)
   7160 			break;
   7161 
   7162 		error = wm_alloc_rx_buffer(sc, rxq);
   7163 		if (error) {
   7164 			wm_free_rx_descs(sc, rxq);
   7165 			break;
   7166 		}
   7167 
   7168 #ifdef WM_EVENT_COUNTERS
   7169 		xname = device_xname(sc->sc_dev);
   7170 
   7171 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7172 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7173 
   7174 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7175 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7176 #endif /* WM_EVENT_COUNTERS */
   7177 
   7178 		rx_done++;
   7179 	}
   7180 	if (error)
   7181 		goto fail_2;
   7182 
   7183 	return 0;
   7184 
   7185  fail_2:
   7186 	for (i = 0; i < rx_done; i++) {
   7187 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7188 		wm_free_rx_buffer(sc, rxq);
   7189 		wm_free_rx_descs(sc, rxq);
   7190 		if (rxq->rxq_lock)
   7191 			mutex_obj_free(rxq->rxq_lock);
   7192 	}
   7193  fail_1:
   7194 	for (i = 0; i < tx_done; i++) {
   7195 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7196 		pcq_destroy(txq->txq_interq);
   7197 		wm_free_tx_buffer(sc, txq);
   7198 		wm_free_tx_descs(sc, txq);
   7199 		if (txq->txq_lock)
   7200 			mutex_obj_free(txq->txq_lock);
   7201 	}
   7202 
   7203 	kmem_free(sc->sc_queue,
   7204 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7205  fail_0:
   7206 	return error;
   7207 }
   7208 
   7209 /*
   7210  * wm_free_quques:
   7211  *	Free {tx,rx}descs and {tx,rx} buffers
   7212  */
   7213 static void
   7214 wm_free_txrx_queues(struct wm_softc *sc)
   7215 {
   7216 	int i;
   7217 
   7218 	for (i = 0; i < sc->sc_nqueues; i++) {
   7219 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7220 
   7221 #ifdef WM_EVENT_COUNTERS
   7222 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7223 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7224 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7225 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7226 #endif /* WM_EVENT_COUNTERS */
   7227 
   7228 		wm_free_rx_buffer(sc, rxq);
   7229 		wm_free_rx_descs(sc, rxq);
   7230 		if (rxq->rxq_lock)
   7231 			mutex_obj_free(rxq->rxq_lock);
   7232 	}
   7233 
   7234 	for (i = 0; i < sc->sc_nqueues; i++) {
   7235 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7236 		struct mbuf *m;
   7237 #ifdef WM_EVENT_COUNTERS
   7238 		int j;
   7239 
   7240 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7241 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7242 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7243 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7244 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7245 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7246 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7247 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7248 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7249 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7250 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7251 
   7252 		for (j = 0; j < WM_NTXSEGS; j++)
   7253 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7254 
   7255 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7256 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7257 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7258 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7259 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7260 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7261 #endif /* WM_EVENT_COUNTERS */
   7262 
   7263 		/* Drain txq_interq */
   7264 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7265 			m_freem(m);
   7266 		pcq_destroy(txq->txq_interq);
   7267 
   7268 		wm_free_tx_buffer(sc, txq);
   7269 		wm_free_tx_descs(sc, txq);
   7270 		if (txq->txq_lock)
   7271 			mutex_obj_free(txq->txq_lock);
   7272 	}
   7273 
   7274 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7275 }
   7276 
   7277 static void
   7278 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7279 {
   7280 
   7281 	KASSERT(mutex_owned(txq->txq_lock));
   7282 
   7283 	/* Initialize the transmit descriptor ring. */
   7284 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7285 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7286 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7287 	txq->txq_free = WM_NTXDESC(txq);
   7288 	txq->txq_next = 0;
   7289 }
   7290 
   7291 static void
   7292 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7293     struct wm_txqueue *txq)
   7294 {
   7295 
   7296 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7297 		device_xname(sc->sc_dev), __func__));
   7298 	KASSERT(mutex_owned(txq->txq_lock));
   7299 
   7300 	if (sc->sc_type < WM_T_82543) {
   7301 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7302 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7303 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7304 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7305 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7306 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7307 	} else {
   7308 		int qid = wmq->wmq_id;
   7309 
   7310 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7311 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7312 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7313 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7314 
   7315 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7316 			/*
   7317 			 * Don't write TDT before TCTL.EN is set.
   7318 			 * See the document.
   7319 			 */
   7320 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7321 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7322 			    | TXDCTL_WTHRESH(0));
   7323 		else {
   7324 			/* XXX should update with AIM? */
   7325 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7326 			if (sc->sc_type >= WM_T_82540) {
   7327 				/* Should be the same */
   7328 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7329 			}
   7330 
   7331 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7332 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7333 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7334 		}
   7335 	}
   7336 }
   7337 
   7338 static void
   7339 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7340 {
   7341 	int i;
   7342 
   7343 	KASSERT(mutex_owned(txq->txq_lock));
   7344 
   7345 	/* Initialize the transmit job descriptors. */
   7346 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7347 		txq->txq_soft[i].txs_mbuf = NULL;
   7348 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7349 	txq->txq_snext = 0;
   7350 	txq->txq_sdirty = 0;
   7351 }
   7352 
   7353 static void
   7354 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7355     struct wm_txqueue *txq)
   7356 {
   7357 
   7358 	KASSERT(mutex_owned(txq->txq_lock));
   7359 
   7360 	/*
   7361 	 * Set up some register offsets that are different between
   7362 	 * the i82542 and the i82543 and later chips.
   7363 	 */
   7364 	if (sc->sc_type < WM_T_82543)
   7365 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7366 	else
   7367 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7368 
   7369 	wm_init_tx_descs(sc, txq);
   7370 	wm_init_tx_regs(sc, wmq, txq);
   7371 	wm_init_tx_buffer(sc, txq);
   7372 
   7373 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7374 	txq->txq_sending = false;
   7375 }
   7376 
   7377 static void
   7378 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7379     struct wm_rxqueue *rxq)
   7380 {
   7381 
   7382 	KASSERT(mutex_owned(rxq->rxq_lock));
   7383 
   7384 	/*
   7385 	 * Initialize the receive descriptor and receive job
   7386 	 * descriptor rings.
   7387 	 */
   7388 	if (sc->sc_type < WM_T_82543) {
   7389 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7390 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7391 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7392 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7393 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7394 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7395 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7396 
   7397 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7398 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7399 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7400 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7401 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7402 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7403 	} else {
   7404 		int qid = wmq->wmq_id;
   7405 
   7406 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7407 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7408 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7409 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7410 
   7411 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7412 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7413 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7414 
   7415 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7416 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7417 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7418 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7419 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7420 			    | RXDCTL_WTHRESH(1));
   7421 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7422 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7423 		} else {
   7424 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7425 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7426 			/* XXX should update with AIM? */
   7427 			CSR_WRITE(sc, WMREG_RDTR,
   7428 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7429 			/* MUST be same */
   7430 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7431 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7432 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7433 		}
   7434 	}
   7435 }
   7436 
   7437 static int
   7438 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7439 {
   7440 	struct wm_rxsoft *rxs;
   7441 	int error, i;
   7442 
   7443 	KASSERT(mutex_owned(rxq->rxq_lock));
   7444 
   7445 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7446 		rxs = &rxq->rxq_soft[i];
   7447 		if (rxs->rxs_mbuf == NULL) {
   7448 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7449 				log(LOG_ERR, "%s: unable to allocate or map "
   7450 				    "rx buffer %d, error = %d\n",
   7451 				    device_xname(sc->sc_dev), i, error);
   7452 				/*
   7453 				 * XXX Should attempt to run with fewer receive
   7454 				 * XXX buffers instead of just failing.
   7455 				 */
   7456 				wm_rxdrain(rxq);
   7457 				return ENOMEM;
   7458 			}
   7459 		} else {
   7460 			/*
   7461 			 * For 82575 and 82576, the RX descriptors must be
   7462 			 * initialized after the setting of RCTL.EN in
   7463 			 * wm_set_filter()
   7464 			 */
   7465 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7466 				wm_init_rxdesc(rxq, i);
   7467 		}
   7468 	}
   7469 	rxq->rxq_ptr = 0;
   7470 	rxq->rxq_discard = 0;
   7471 	WM_RXCHAIN_RESET(rxq);
   7472 
   7473 	return 0;
   7474 }
   7475 
   7476 static int
   7477 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7478     struct wm_rxqueue *rxq)
   7479 {
   7480 
   7481 	KASSERT(mutex_owned(rxq->rxq_lock));
   7482 
   7483 	/*
   7484 	 * Set up some register offsets that are different between
   7485 	 * the i82542 and the i82543 and later chips.
   7486 	 */
   7487 	if (sc->sc_type < WM_T_82543)
   7488 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7489 	else
   7490 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7491 
   7492 	wm_init_rx_regs(sc, wmq, rxq);
   7493 	return wm_init_rx_buffer(sc, rxq);
   7494 }
   7495 
   7496 /*
   7497  * wm_init_quques:
   7498  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7499  */
   7500 static int
   7501 wm_init_txrx_queues(struct wm_softc *sc)
   7502 {
   7503 	int i, error = 0;
   7504 
   7505 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7506 		device_xname(sc->sc_dev), __func__));
   7507 
   7508 	for (i = 0; i < sc->sc_nqueues; i++) {
   7509 		struct wm_queue *wmq = &sc->sc_queue[i];
   7510 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7511 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7512 
   7513 		/*
   7514 		 * TODO
   7515 		 * Currently, use constant variable instead of AIM.
   7516 		 * Furthermore, the interrupt interval of multiqueue which use
   7517 		 * polling mode is less than default value.
   7518 		 * More tuning and AIM are required.
   7519 		 */
   7520 		if (wm_is_using_multiqueue(sc))
   7521 			wmq->wmq_itr = 50;
   7522 		else
   7523 			wmq->wmq_itr = sc->sc_itr_init;
   7524 		wmq->wmq_set_itr = true;
   7525 
   7526 		mutex_enter(txq->txq_lock);
   7527 		wm_init_tx_queue(sc, wmq, txq);
   7528 		mutex_exit(txq->txq_lock);
   7529 
   7530 		mutex_enter(rxq->rxq_lock);
   7531 		error = wm_init_rx_queue(sc, wmq, rxq);
   7532 		mutex_exit(rxq->rxq_lock);
   7533 		if (error)
   7534 			break;
   7535 	}
   7536 
   7537 	return error;
   7538 }
   7539 
   7540 /*
   7541  * wm_tx_offload:
   7542  *
   7543  *	Set up TCP/IP checksumming parameters for the
   7544  *	specified packet.
   7545  */
   7546 static void
   7547 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7548     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7549 {
   7550 	struct mbuf *m0 = txs->txs_mbuf;
   7551 	struct livengood_tcpip_ctxdesc *t;
   7552 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7553 	uint32_t ipcse;
   7554 	struct ether_header *eh;
   7555 	int offset, iphl;
   7556 	uint8_t fields;
   7557 
   7558 	/*
   7559 	 * XXX It would be nice if the mbuf pkthdr had offset
   7560 	 * fields for the protocol headers.
   7561 	 */
   7562 
   7563 	eh = mtod(m0, struct ether_header *);
   7564 	switch (htons(eh->ether_type)) {
   7565 	case ETHERTYPE_IP:
   7566 	case ETHERTYPE_IPV6:
   7567 		offset = ETHER_HDR_LEN;
   7568 		break;
   7569 
   7570 	case ETHERTYPE_VLAN:
   7571 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7572 		break;
   7573 
   7574 	default:
   7575 		/* Don't support this protocol or encapsulation. */
   7576 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7577 		txq->txq_last_hw_ipcs = 0;
   7578 		txq->txq_last_hw_tucs = 0;
   7579 		*fieldsp = 0;
   7580 		*cmdp = 0;
   7581 		return;
   7582 	}
   7583 
   7584 	if ((m0->m_pkthdr.csum_flags &
   7585 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7586 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7587 	} else
   7588 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7589 
   7590 	ipcse = offset + iphl - 1;
   7591 
   7592 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7593 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7594 	seg = 0;
   7595 	fields = 0;
   7596 
   7597 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7598 		int hlen = offset + iphl;
   7599 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7600 
   7601 		if (__predict_false(m0->m_len <
   7602 				    (hlen + sizeof(struct tcphdr)))) {
   7603 			/*
   7604 			 * TCP/IP headers are not in the first mbuf; we need
   7605 			 * to do this the slow and painful way. Let's just
   7606 			 * hope this doesn't happen very often.
   7607 			 */
   7608 			struct tcphdr th;
   7609 
   7610 			WM_Q_EVCNT_INCR(txq, tsopain);
   7611 
   7612 			m_copydata(m0, hlen, sizeof(th), &th);
   7613 			if (v4) {
   7614 				struct ip ip;
   7615 
   7616 				m_copydata(m0, offset, sizeof(ip), &ip);
   7617 				ip.ip_len = 0;
   7618 				m_copyback(m0,
   7619 				    offset + offsetof(struct ip, ip_len),
   7620 				    sizeof(ip.ip_len), &ip.ip_len);
   7621 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7622 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7623 			} else {
   7624 				struct ip6_hdr ip6;
   7625 
   7626 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7627 				ip6.ip6_plen = 0;
   7628 				m_copyback(m0,
   7629 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7630 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7631 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7632 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7633 			}
   7634 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7635 			    sizeof(th.th_sum), &th.th_sum);
   7636 
   7637 			hlen += th.th_off << 2;
   7638 		} else {
   7639 			/*
   7640 			 * TCP/IP headers are in the first mbuf; we can do
   7641 			 * this the easy way.
   7642 			 */
   7643 			struct tcphdr *th;
   7644 
   7645 			if (v4) {
   7646 				struct ip *ip =
   7647 				    (void *)(mtod(m0, char *) + offset);
   7648 				th = (void *)(mtod(m0, char *) + hlen);
   7649 
   7650 				ip->ip_len = 0;
   7651 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7652 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7653 			} else {
   7654 				struct ip6_hdr *ip6 =
   7655 				    (void *)(mtod(m0, char *) + offset);
   7656 				th = (void *)(mtod(m0, char *) + hlen);
   7657 
   7658 				ip6->ip6_plen = 0;
   7659 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7660 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7661 			}
   7662 			hlen += th->th_off << 2;
   7663 		}
   7664 
   7665 		if (v4) {
   7666 			WM_Q_EVCNT_INCR(txq, tso);
   7667 			cmdlen |= WTX_TCPIP_CMD_IP;
   7668 		} else {
   7669 			WM_Q_EVCNT_INCR(txq, tso6);
   7670 			ipcse = 0;
   7671 		}
   7672 		cmd |= WTX_TCPIP_CMD_TSE;
   7673 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7674 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7675 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7676 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7677 	}
   7678 
   7679 	/*
   7680 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7681 	 * offload feature, if we load the context descriptor, we
   7682 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7683 	 */
   7684 
   7685 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7686 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7687 	    WTX_TCPIP_IPCSE(ipcse);
   7688 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7689 		WM_Q_EVCNT_INCR(txq, ipsum);
   7690 		fields |= WTX_IXSM;
   7691 	}
   7692 
   7693 	offset += iphl;
   7694 
   7695 	if (m0->m_pkthdr.csum_flags &
   7696 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7697 		WM_Q_EVCNT_INCR(txq, tusum);
   7698 		fields |= WTX_TXSM;
   7699 		tucs = WTX_TCPIP_TUCSS(offset) |
   7700 		    WTX_TCPIP_TUCSO(offset +
   7701 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7702 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7703 	} else if ((m0->m_pkthdr.csum_flags &
   7704 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7705 		WM_Q_EVCNT_INCR(txq, tusum6);
   7706 		fields |= WTX_TXSM;
   7707 		tucs = WTX_TCPIP_TUCSS(offset) |
   7708 		    WTX_TCPIP_TUCSO(offset +
   7709 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7710 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7711 	} else {
   7712 		/* Just initialize it to a valid TCP context. */
   7713 		tucs = WTX_TCPIP_TUCSS(offset) |
   7714 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7715 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7716 	}
   7717 
   7718 	*cmdp = cmd;
   7719 	*fieldsp = fields;
   7720 
   7721 	/*
   7722 	 * We don't have to write context descriptor for every packet
   7723 	 * except for 82574. For 82574, we must write context descriptor
   7724 	 * for every packet when we use two descriptor queues.
   7725 	 *
   7726 	 * The 82574L can only remember the *last* context used
   7727 	 * regardless of queue that it was use for.  We cannot reuse
   7728 	 * contexts on this hardware platform and must generate a new
   7729 	 * context every time.  82574L hardware spec, section 7.2.6,
   7730 	 * second note.
   7731 	 */
   7732 	if (sc->sc_nqueues < 2) {
   7733 		/*
   7734 		 * Setting up new checksum offload context for every
   7735 		 * frames takes a lot of processing time for hardware.
   7736 		 * This also reduces performance a lot for small sized
   7737 		 * frames so avoid it if driver can use previously
   7738 		 * configured checksum offload context.
   7739 		 * For TSO, in theory we can use the same TSO context only if
   7740 		 * frame is the same type(IP/TCP) and the same MSS. However
   7741 		 * checking whether a frame has the same IP/TCP structure is
   7742 		 * hard thing so just ignore that and always restablish a
   7743 		 * new TSO context.
   7744 		 */
   7745 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   7746 		    == 0) {
   7747 			if (txq->txq_last_hw_cmd == cmd &&
   7748 			    txq->txq_last_hw_fields == fields &&
   7749 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   7750 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   7751 				WM_Q_EVCNT_INCR(txq, skipcontext);
   7752 				return;
   7753 			}
   7754 		}
   7755 
   7756 		txq->txq_last_hw_cmd = cmd;
   7757 		txq->txq_last_hw_fields = fields;
   7758 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   7759 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   7760 	}
   7761 
   7762 	/* Fill in the context descriptor. */
   7763 	t = (struct livengood_tcpip_ctxdesc *)
   7764 	    &txq->txq_descs[txq->txq_next];
   7765 	t->tcpip_ipcs = htole32(ipcs);
   7766 	t->tcpip_tucs = htole32(tucs);
   7767 	t->tcpip_cmdlen = htole32(cmdlen);
   7768 	t->tcpip_seg = htole32(seg);
   7769 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7770 
   7771 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7772 	txs->txs_ndesc++;
   7773 }
   7774 
   7775 static inline int
   7776 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7777 {
   7778 	struct wm_softc *sc = ifp->if_softc;
   7779 	u_int cpuid = cpu_index(curcpu());
   7780 
   7781 	/*
   7782 	 * Currently, simple distribute strategy.
   7783 	 * TODO:
   7784 	 * distribute by flowid(RSS has value).
   7785 	 */
   7786 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7787 }
   7788 
   7789 static inline bool
   7790 wm_linkdown_discard(struct wm_txqueue *txq)
   7791 {
   7792 
   7793 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   7794 		return true;
   7795 
   7796 	return false;
   7797 }
   7798 
   7799 /*
   7800  * wm_start:		[ifnet interface function]
   7801  *
   7802  *	Start packet transmission on the interface.
   7803  */
   7804 static void
   7805 wm_start(struct ifnet *ifp)
   7806 {
   7807 	struct wm_softc *sc = ifp->if_softc;
   7808 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7809 
   7810 #ifdef WM_MPSAFE
   7811 	KASSERT(if_is_mpsafe(ifp));
   7812 #endif
   7813 	/*
   7814 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7815 	 */
   7816 
   7817 	mutex_enter(txq->txq_lock);
   7818 	if (!txq->txq_stopping)
   7819 		wm_start_locked(ifp);
   7820 	mutex_exit(txq->txq_lock);
   7821 }
   7822 
   7823 static void
   7824 wm_start_locked(struct ifnet *ifp)
   7825 {
   7826 	struct wm_softc *sc = ifp->if_softc;
   7827 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7828 
   7829 	wm_send_common_locked(ifp, txq, false);
   7830 }
   7831 
   7832 static int
   7833 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7834 {
   7835 	int qid;
   7836 	struct wm_softc *sc = ifp->if_softc;
   7837 	struct wm_txqueue *txq;
   7838 
   7839 	qid = wm_select_txqueue(ifp, m);
   7840 	txq = &sc->sc_queue[qid].wmq_txq;
   7841 
   7842 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7843 		m_freem(m);
   7844 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7845 		return ENOBUFS;
   7846 	}
   7847 
   7848 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7849 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7850 	if (m->m_flags & M_MCAST)
   7851 		if_statinc_ref(nsr, if_omcasts);
   7852 	IF_STAT_PUTREF(ifp);
   7853 
   7854 	if (mutex_tryenter(txq->txq_lock)) {
   7855 		if (!txq->txq_stopping)
   7856 			wm_transmit_locked(ifp, txq);
   7857 		mutex_exit(txq->txq_lock);
   7858 	}
   7859 
   7860 	return 0;
   7861 }
   7862 
   7863 static void
   7864 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7865 {
   7866 
   7867 	wm_send_common_locked(ifp, txq, true);
   7868 }
   7869 
   7870 static void
   7871 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7872     bool is_transmit)
   7873 {
   7874 	struct wm_softc *sc = ifp->if_softc;
   7875 	struct mbuf *m0;
   7876 	struct wm_txsoft *txs;
   7877 	bus_dmamap_t dmamap;
   7878 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7879 	bus_addr_t curaddr;
   7880 	bus_size_t seglen, curlen;
   7881 	uint32_t cksumcmd;
   7882 	uint8_t cksumfields;
   7883 	bool remap = true;
   7884 
   7885 	KASSERT(mutex_owned(txq->txq_lock));
   7886 
   7887 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7888 		return;
   7889 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7890 		return;
   7891 
   7892 	if (__predict_false(wm_linkdown_discard(txq))) {
   7893 		do {
   7894 			if (is_transmit)
   7895 				m0 = pcq_get(txq->txq_interq);
   7896 			else
   7897 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   7898 			/*
   7899 			 * increment successed packet counter as in the case
   7900 			 * which the packet is discarded by link down PHY.
   7901 			 */
   7902 			if (m0 != NULL)
   7903 				if_statinc(ifp, if_opackets);
   7904 			m_freem(m0);
   7905 		} while (m0 != NULL);
   7906 		return;
   7907 	}
   7908 
   7909 	/* Remember the previous number of free descriptors. */
   7910 	ofree = txq->txq_free;
   7911 
   7912 	/*
   7913 	 * Loop through the send queue, setting up transmit descriptors
   7914 	 * until we drain the queue, or use up all available transmit
   7915 	 * descriptors.
   7916 	 */
   7917 	for (;;) {
   7918 		m0 = NULL;
   7919 
   7920 		/* Get a work queue entry. */
   7921 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7922 			wm_txeof(txq, UINT_MAX);
   7923 			if (txq->txq_sfree == 0) {
   7924 				DPRINTF(sc, WM_DEBUG_TX,
   7925 				    ("%s: TX: no free job descriptors\n",
   7926 					device_xname(sc->sc_dev)));
   7927 				WM_Q_EVCNT_INCR(txq, txsstall);
   7928 				break;
   7929 			}
   7930 		}
   7931 
   7932 		/* Grab a packet off the queue. */
   7933 		if (is_transmit)
   7934 			m0 = pcq_get(txq->txq_interq);
   7935 		else
   7936 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7937 		if (m0 == NULL)
   7938 			break;
   7939 
   7940 		DPRINTF(sc, WM_DEBUG_TX,
   7941 		    ("%s: TX: have packet to transmit: %p\n",
   7942 			device_xname(sc->sc_dev), m0));
   7943 
   7944 		txs = &txq->txq_soft[txq->txq_snext];
   7945 		dmamap = txs->txs_dmamap;
   7946 
   7947 		use_tso = (m0->m_pkthdr.csum_flags &
   7948 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7949 
   7950 		/*
   7951 		 * So says the Linux driver:
   7952 		 * The controller does a simple calculation to make sure
   7953 		 * there is enough room in the FIFO before initiating the
   7954 		 * DMA for each buffer. The calc is:
   7955 		 *	4 = ceil(buffer len / MSS)
   7956 		 * To make sure we don't overrun the FIFO, adjust the max
   7957 		 * buffer len if the MSS drops.
   7958 		 */
   7959 		dmamap->dm_maxsegsz =
   7960 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7961 		    ? m0->m_pkthdr.segsz << 2
   7962 		    : WTX_MAX_LEN;
   7963 
   7964 		/*
   7965 		 * Load the DMA map.  If this fails, the packet either
   7966 		 * didn't fit in the allotted number of segments, or we
   7967 		 * were short on resources.  For the too-many-segments
   7968 		 * case, we simply report an error and drop the packet,
   7969 		 * since we can't sanely copy a jumbo packet to a single
   7970 		 * buffer.
   7971 		 */
   7972 retry:
   7973 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7974 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7975 		if (__predict_false(error)) {
   7976 			if (error == EFBIG) {
   7977 				if (remap == true) {
   7978 					struct mbuf *m;
   7979 
   7980 					remap = false;
   7981 					m = m_defrag(m0, M_NOWAIT);
   7982 					if (m != NULL) {
   7983 						WM_Q_EVCNT_INCR(txq, defrag);
   7984 						m0 = m;
   7985 						goto retry;
   7986 					}
   7987 				}
   7988 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7989 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7990 				    "DMA segments, dropping...\n",
   7991 				    device_xname(sc->sc_dev));
   7992 				wm_dump_mbuf_chain(sc, m0);
   7993 				m_freem(m0);
   7994 				continue;
   7995 			}
   7996 			/* Short on resources, just stop for now. */
   7997 			DPRINTF(sc, WM_DEBUG_TX,
   7998 			    ("%s: TX: dmamap load failed: %d\n",
   7999 				device_xname(sc->sc_dev), error));
   8000 			break;
   8001 		}
   8002 
   8003 		segs_needed = dmamap->dm_nsegs;
   8004 		if (use_tso) {
   8005 			/* For sentinel descriptor; see below. */
   8006 			segs_needed++;
   8007 		}
   8008 
   8009 		/*
   8010 		 * Ensure we have enough descriptors free to describe
   8011 		 * the packet. Note, we always reserve one descriptor
   8012 		 * at the end of the ring due to the semantics of the
   8013 		 * TDT register, plus one more in the event we need
   8014 		 * to load offload context.
   8015 		 */
   8016 		if (segs_needed > txq->txq_free - 2) {
   8017 			/*
   8018 			 * Not enough free descriptors to transmit this
   8019 			 * packet.  We haven't committed anything yet,
   8020 			 * so just unload the DMA map, put the packet
   8021 			 * pack on the queue, and punt. Notify the upper
   8022 			 * layer that there are no more slots left.
   8023 			 */
   8024 			DPRINTF(sc, WM_DEBUG_TX,
   8025 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8026 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8027 				segs_needed, txq->txq_free - 1));
   8028 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8029 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8030 			WM_Q_EVCNT_INCR(txq, txdstall);
   8031 			break;
   8032 		}
   8033 
   8034 		/*
   8035 		 * Check for 82547 Tx FIFO bug. We need to do this
   8036 		 * once we know we can transmit the packet, since we
   8037 		 * do some internal FIFO space accounting here.
   8038 		 */
   8039 		if (sc->sc_type == WM_T_82547 &&
   8040 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8041 			DPRINTF(sc, WM_DEBUG_TX,
   8042 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8043 				device_xname(sc->sc_dev)));
   8044 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8045 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8046 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8047 			break;
   8048 		}
   8049 
   8050 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8051 
   8052 		DPRINTF(sc, WM_DEBUG_TX,
   8053 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8054 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8055 
   8056 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8057 
   8058 		/*
   8059 		 * Store a pointer to the packet so that we can free it
   8060 		 * later.
   8061 		 *
   8062 		 * Initially, we consider the number of descriptors the
   8063 		 * packet uses the number of DMA segments.  This may be
   8064 		 * incremented by 1 if we do checksum offload (a descriptor
   8065 		 * is used to set the checksum context).
   8066 		 */
   8067 		txs->txs_mbuf = m0;
   8068 		txs->txs_firstdesc = txq->txq_next;
   8069 		txs->txs_ndesc = segs_needed;
   8070 
   8071 		/* Set up offload parameters for this packet. */
   8072 		if (m0->m_pkthdr.csum_flags &
   8073 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8074 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8075 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8076 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8077 		} else {
   8078 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8079 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8080 			cksumcmd = 0;
   8081 			cksumfields = 0;
   8082 		}
   8083 
   8084 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8085 
   8086 		/* Sync the DMA map. */
   8087 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8088 		    BUS_DMASYNC_PREWRITE);
   8089 
   8090 		/* Initialize the transmit descriptor. */
   8091 		for (nexttx = txq->txq_next, seg = 0;
   8092 		     seg < dmamap->dm_nsegs; seg++) {
   8093 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8094 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8095 			     seglen != 0;
   8096 			     curaddr += curlen, seglen -= curlen,
   8097 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8098 				curlen = seglen;
   8099 
   8100 				/*
   8101 				 * So says the Linux driver:
   8102 				 * Work around for premature descriptor
   8103 				 * write-backs in TSO mode.  Append a
   8104 				 * 4-byte sentinel descriptor.
   8105 				 */
   8106 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8107 				    curlen > 8)
   8108 					curlen -= 4;
   8109 
   8110 				wm_set_dma_addr(
   8111 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8112 				txq->txq_descs[nexttx].wtx_cmdlen
   8113 				    = htole32(cksumcmd | curlen);
   8114 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8115 				    = 0;
   8116 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8117 				    = cksumfields;
   8118 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8119 				lasttx = nexttx;
   8120 
   8121 				DPRINTF(sc, WM_DEBUG_TX,
   8122 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8123 					"len %#04zx\n",
   8124 					device_xname(sc->sc_dev), nexttx,
   8125 					(uint64_t)curaddr, curlen));
   8126 			}
   8127 		}
   8128 
   8129 		KASSERT(lasttx != -1);
   8130 
   8131 		/*
   8132 		 * Set up the command byte on the last descriptor of
   8133 		 * the packet. If we're in the interrupt delay window,
   8134 		 * delay the interrupt.
   8135 		 */
   8136 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8137 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8138 
   8139 		/*
   8140 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8141 		 * up the descriptor to encapsulate the packet for us.
   8142 		 *
   8143 		 * This is only valid on the last descriptor of the packet.
   8144 		 */
   8145 		if (vlan_has_tag(m0)) {
   8146 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8147 			    htole32(WTX_CMD_VLE);
   8148 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8149 			    = htole16(vlan_get_tag(m0));
   8150 		}
   8151 
   8152 		txs->txs_lastdesc = lasttx;
   8153 
   8154 		DPRINTF(sc, WM_DEBUG_TX,
   8155 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8156 			device_xname(sc->sc_dev),
   8157 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8158 
   8159 		/* Sync the descriptors we're using. */
   8160 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8161 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8162 
   8163 		/* Give the packet to the chip. */
   8164 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8165 
   8166 		DPRINTF(sc, WM_DEBUG_TX,
   8167 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8168 
   8169 		DPRINTF(sc, WM_DEBUG_TX,
   8170 		    ("%s: TX: finished transmitting packet, job %d\n",
   8171 			device_xname(sc->sc_dev), txq->txq_snext));
   8172 
   8173 		/* Advance the tx pointer. */
   8174 		txq->txq_free -= txs->txs_ndesc;
   8175 		txq->txq_next = nexttx;
   8176 
   8177 		txq->txq_sfree--;
   8178 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8179 
   8180 		/* Pass the packet to any BPF listeners. */
   8181 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8182 	}
   8183 
   8184 	if (m0 != NULL) {
   8185 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8186 		WM_Q_EVCNT_INCR(txq, descdrop);
   8187 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8188 			__func__));
   8189 		m_freem(m0);
   8190 	}
   8191 
   8192 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8193 		/* No more slots; notify upper layer. */
   8194 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8195 	}
   8196 
   8197 	if (txq->txq_free != ofree) {
   8198 		/* Set a watchdog timer in case the chip flakes out. */
   8199 		txq->txq_lastsent = time_uptime;
   8200 		txq->txq_sending = true;
   8201 	}
   8202 }
   8203 
   8204 /*
   8205  * wm_nq_tx_offload:
   8206  *
   8207  *	Set up TCP/IP checksumming parameters for the
   8208  *	specified packet, for NEWQUEUE devices
   8209  */
   8210 static void
   8211 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8212     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8213 {
   8214 	struct mbuf *m0 = txs->txs_mbuf;
   8215 	uint32_t vl_len, mssidx, cmdc;
   8216 	struct ether_header *eh;
   8217 	int offset, iphl;
   8218 
   8219 	/*
   8220 	 * XXX It would be nice if the mbuf pkthdr had offset
   8221 	 * fields for the protocol headers.
   8222 	 */
   8223 	*cmdlenp = 0;
   8224 	*fieldsp = 0;
   8225 
   8226 	eh = mtod(m0, struct ether_header *);
   8227 	switch (htons(eh->ether_type)) {
   8228 	case ETHERTYPE_IP:
   8229 	case ETHERTYPE_IPV6:
   8230 		offset = ETHER_HDR_LEN;
   8231 		break;
   8232 
   8233 	case ETHERTYPE_VLAN:
   8234 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8235 		break;
   8236 
   8237 	default:
   8238 		/* Don't support this protocol or encapsulation. */
   8239 		*do_csum = false;
   8240 		return;
   8241 	}
   8242 	*do_csum = true;
   8243 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8244 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8245 
   8246 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8247 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8248 
   8249 	if ((m0->m_pkthdr.csum_flags &
   8250 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8251 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8252 	} else {
   8253 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8254 	}
   8255 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8256 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8257 
   8258 	if (vlan_has_tag(m0)) {
   8259 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8260 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8261 		*cmdlenp |= NQTX_CMD_VLE;
   8262 	}
   8263 
   8264 	mssidx = 0;
   8265 
   8266 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8267 		int hlen = offset + iphl;
   8268 		int tcp_hlen;
   8269 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8270 
   8271 		if (__predict_false(m0->m_len <
   8272 				    (hlen + sizeof(struct tcphdr)))) {
   8273 			/*
   8274 			 * TCP/IP headers are not in the first mbuf; we need
   8275 			 * to do this the slow and painful way. Let's just
   8276 			 * hope this doesn't happen very often.
   8277 			 */
   8278 			struct tcphdr th;
   8279 
   8280 			WM_Q_EVCNT_INCR(txq, tsopain);
   8281 
   8282 			m_copydata(m0, hlen, sizeof(th), &th);
   8283 			if (v4) {
   8284 				struct ip ip;
   8285 
   8286 				m_copydata(m0, offset, sizeof(ip), &ip);
   8287 				ip.ip_len = 0;
   8288 				m_copyback(m0,
   8289 				    offset + offsetof(struct ip, ip_len),
   8290 				    sizeof(ip.ip_len), &ip.ip_len);
   8291 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8292 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8293 			} else {
   8294 				struct ip6_hdr ip6;
   8295 
   8296 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8297 				ip6.ip6_plen = 0;
   8298 				m_copyback(m0,
   8299 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8300 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8301 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8302 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8303 			}
   8304 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8305 			    sizeof(th.th_sum), &th.th_sum);
   8306 
   8307 			tcp_hlen = th.th_off << 2;
   8308 		} else {
   8309 			/*
   8310 			 * TCP/IP headers are in the first mbuf; we can do
   8311 			 * this the easy way.
   8312 			 */
   8313 			struct tcphdr *th;
   8314 
   8315 			if (v4) {
   8316 				struct ip *ip =
   8317 				    (void *)(mtod(m0, char *) + offset);
   8318 				th = (void *)(mtod(m0, char *) + hlen);
   8319 
   8320 				ip->ip_len = 0;
   8321 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8322 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8323 			} else {
   8324 				struct ip6_hdr *ip6 =
   8325 				    (void *)(mtod(m0, char *) + offset);
   8326 				th = (void *)(mtod(m0, char *) + hlen);
   8327 
   8328 				ip6->ip6_plen = 0;
   8329 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8330 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8331 			}
   8332 			tcp_hlen = th->th_off << 2;
   8333 		}
   8334 		hlen += tcp_hlen;
   8335 		*cmdlenp |= NQTX_CMD_TSE;
   8336 
   8337 		if (v4) {
   8338 			WM_Q_EVCNT_INCR(txq, tso);
   8339 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8340 		} else {
   8341 			WM_Q_EVCNT_INCR(txq, tso6);
   8342 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8343 		}
   8344 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8345 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8346 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8347 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8348 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8349 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8350 	} else {
   8351 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8352 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8353 	}
   8354 
   8355 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8356 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8357 		cmdc |= NQTXC_CMD_IP4;
   8358 	}
   8359 
   8360 	if (m0->m_pkthdr.csum_flags &
   8361 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8362 		WM_Q_EVCNT_INCR(txq, tusum);
   8363 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8364 			cmdc |= NQTXC_CMD_TCP;
   8365 		else
   8366 			cmdc |= NQTXC_CMD_UDP;
   8367 
   8368 		cmdc |= NQTXC_CMD_IP4;
   8369 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8370 	}
   8371 	if (m0->m_pkthdr.csum_flags &
   8372 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8373 		WM_Q_EVCNT_INCR(txq, tusum6);
   8374 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8375 			cmdc |= NQTXC_CMD_TCP;
   8376 		else
   8377 			cmdc |= NQTXC_CMD_UDP;
   8378 
   8379 		cmdc |= NQTXC_CMD_IP6;
   8380 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8381 	}
   8382 
   8383 	/*
   8384 	 * We don't have to write context descriptor for every packet to
   8385 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8386 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8387 	 * controllers.
   8388 	 * It would be overhead to write context descriptor for every packet,
   8389 	 * however it does not cause problems.
   8390 	 */
   8391 	/* Fill in the context descriptor. */
   8392 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8393 	    htole32(vl_len);
   8394 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8395 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8396 	    htole32(cmdc);
   8397 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8398 	    htole32(mssidx);
   8399 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8400 	DPRINTF(sc, WM_DEBUG_TX,
   8401 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8402 		txq->txq_next, 0, vl_len));
   8403 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8404 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8405 	txs->txs_ndesc++;
   8406 }
   8407 
   8408 /*
   8409  * wm_nq_start:		[ifnet interface function]
   8410  *
   8411  *	Start packet transmission on the interface for NEWQUEUE devices
   8412  */
   8413 static void
   8414 wm_nq_start(struct ifnet *ifp)
   8415 {
   8416 	struct wm_softc *sc = ifp->if_softc;
   8417 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8418 
   8419 #ifdef WM_MPSAFE
   8420 	KASSERT(if_is_mpsafe(ifp));
   8421 #endif
   8422 	/*
   8423 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8424 	 */
   8425 
   8426 	mutex_enter(txq->txq_lock);
   8427 	if (!txq->txq_stopping)
   8428 		wm_nq_start_locked(ifp);
   8429 	mutex_exit(txq->txq_lock);
   8430 }
   8431 
   8432 static void
   8433 wm_nq_start_locked(struct ifnet *ifp)
   8434 {
   8435 	struct wm_softc *sc = ifp->if_softc;
   8436 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8437 
   8438 	wm_nq_send_common_locked(ifp, txq, false);
   8439 }
   8440 
   8441 static int
   8442 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8443 {
   8444 	int qid;
   8445 	struct wm_softc *sc = ifp->if_softc;
   8446 	struct wm_txqueue *txq;
   8447 
   8448 	qid = wm_select_txqueue(ifp, m);
   8449 	txq = &sc->sc_queue[qid].wmq_txq;
   8450 
   8451 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8452 		m_freem(m);
   8453 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8454 		return ENOBUFS;
   8455 	}
   8456 
   8457 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8458 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8459 	if (m->m_flags & M_MCAST)
   8460 		if_statinc_ref(nsr, if_omcasts);
   8461 	IF_STAT_PUTREF(ifp);
   8462 
   8463 	/*
   8464 	 * The situations which this mutex_tryenter() fails at running time
   8465 	 * are below two patterns.
   8466 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8467 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8468 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8469 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8470 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8471 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8472 	 * stuck, either.
   8473 	 */
   8474 	if (mutex_tryenter(txq->txq_lock)) {
   8475 		if (!txq->txq_stopping)
   8476 			wm_nq_transmit_locked(ifp, txq);
   8477 		mutex_exit(txq->txq_lock);
   8478 	}
   8479 
   8480 	return 0;
   8481 }
   8482 
   8483 static void
   8484 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8485 {
   8486 
   8487 	wm_nq_send_common_locked(ifp, txq, true);
   8488 }
   8489 
   8490 static void
   8491 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8492     bool is_transmit)
   8493 {
   8494 	struct wm_softc *sc = ifp->if_softc;
   8495 	struct mbuf *m0;
   8496 	struct wm_txsoft *txs;
   8497 	bus_dmamap_t dmamap;
   8498 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8499 	bool do_csum, sent;
   8500 	bool remap = true;
   8501 
   8502 	KASSERT(mutex_owned(txq->txq_lock));
   8503 
   8504 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8505 		return;
   8506 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8507 		return;
   8508 
   8509 	if (__predict_false(wm_linkdown_discard(txq))) {
   8510 		do {
   8511 			if (is_transmit)
   8512 				m0 = pcq_get(txq->txq_interq);
   8513 			else
   8514 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8515 			/*
   8516 			 * increment successed packet counter as in the case
   8517 			 * which the packet is discarded by link down PHY.
   8518 			 */
   8519 			if (m0 != NULL)
   8520 				if_statinc(ifp, if_opackets);
   8521 			m_freem(m0);
   8522 		} while (m0 != NULL);
   8523 		return;
   8524 	}
   8525 
   8526 	sent = false;
   8527 
   8528 	/*
   8529 	 * Loop through the send queue, setting up transmit descriptors
   8530 	 * until we drain the queue, or use up all available transmit
   8531 	 * descriptors.
   8532 	 */
   8533 	for (;;) {
   8534 		m0 = NULL;
   8535 
   8536 		/* Get a work queue entry. */
   8537 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8538 			wm_txeof(txq, UINT_MAX);
   8539 			if (txq->txq_sfree == 0) {
   8540 				DPRINTF(sc, WM_DEBUG_TX,
   8541 				    ("%s: TX: no free job descriptors\n",
   8542 					device_xname(sc->sc_dev)));
   8543 				WM_Q_EVCNT_INCR(txq, txsstall);
   8544 				break;
   8545 			}
   8546 		}
   8547 
   8548 		/* Grab a packet off the queue. */
   8549 		if (is_transmit)
   8550 			m0 = pcq_get(txq->txq_interq);
   8551 		else
   8552 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8553 		if (m0 == NULL)
   8554 			break;
   8555 
   8556 		DPRINTF(sc, WM_DEBUG_TX,
   8557 		    ("%s: TX: have packet to transmit: %p\n",
   8558 		    device_xname(sc->sc_dev), m0));
   8559 
   8560 		txs = &txq->txq_soft[txq->txq_snext];
   8561 		dmamap = txs->txs_dmamap;
   8562 
   8563 		/*
   8564 		 * Load the DMA map.  If this fails, the packet either
   8565 		 * didn't fit in the allotted number of segments, or we
   8566 		 * were short on resources.  For the too-many-segments
   8567 		 * case, we simply report an error and drop the packet,
   8568 		 * since we can't sanely copy a jumbo packet to a single
   8569 		 * buffer.
   8570 		 */
   8571 retry:
   8572 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8573 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8574 		if (__predict_false(error)) {
   8575 			if (error == EFBIG) {
   8576 				if (remap == true) {
   8577 					struct mbuf *m;
   8578 
   8579 					remap = false;
   8580 					m = m_defrag(m0, M_NOWAIT);
   8581 					if (m != NULL) {
   8582 						WM_Q_EVCNT_INCR(txq, defrag);
   8583 						m0 = m;
   8584 						goto retry;
   8585 					}
   8586 				}
   8587 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8588 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8589 				    "DMA segments, dropping...\n",
   8590 				    device_xname(sc->sc_dev));
   8591 				wm_dump_mbuf_chain(sc, m0);
   8592 				m_freem(m0);
   8593 				continue;
   8594 			}
   8595 			/* Short on resources, just stop for now. */
   8596 			DPRINTF(sc, WM_DEBUG_TX,
   8597 			    ("%s: TX: dmamap load failed: %d\n",
   8598 				device_xname(sc->sc_dev), error));
   8599 			break;
   8600 		}
   8601 
   8602 		segs_needed = dmamap->dm_nsegs;
   8603 
   8604 		/*
   8605 		 * Ensure we have enough descriptors free to describe
   8606 		 * the packet. Note, we always reserve one descriptor
   8607 		 * at the end of the ring due to the semantics of the
   8608 		 * TDT register, plus one more in the event we need
   8609 		 * to load offload context.
   8610 		 */
   8611 		if (segs_needed > txq->txq_free - 2) {
   8612 			/*
   8613 			 * Not enough free descriptors to transmit this
   8614 			 * packet.  We haven't committed anything yet,
   8615 			 * so just unload the DMA map, put the packet
   8616 			 * pack on the queue, and punt. Notify the upper
   8617 			 * layer that there are no more slots left.
   8618 			 */
   8619 			DPRINTF(sc, WM_DEBUG_TX,
   8620 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8621 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8622 				segs_needed, txq->txq_free - 1));
   8623 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8624 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8625 			WM_Q_EVCNT_INCR(txq, txdstall);
   8626 			break;
   8627 		}
   8628 
   8629 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8630 
   8631 		DPRINTF(sc, WM_DEBUG_TX,
   8632 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8633 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8634 
   8635 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8636 
   8637 		/*
   8638 		 * Store a pointer to the packet so that we can free it
   8639 		 * later.
   8640 		 *
   8641 		 * Initially, we consider the number of descriptors the
   8642 		 * packet uses the number of DMA segments.  This may be
   8643 		 * incremented by 1 if we do checksum offload (a descriptor
   8644 		 * is used to set the checksum context).
   8645 		 */
   8646 		txs->txs_mbuf = m0;
   8647 		txs->txs_firstdesc = txq->txq_next;
   8648 		txs->txs_ndesc = segs_needed;
   8649 
   8650 		/* Set up offload parameters for this packet. */
   8651 		uint32_t cmdlen, fields, dcmdlen;
   8652 		if (m0->m_pkthdr.csum_flags &
   8653 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8654 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8655 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8656 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8657 			    &do_csum);
   8658 		} else {
   8659 			do_csum = false;
   8660 			cmdlen = 0;
   8661 			fields = 0;
   8662 		}
   8663 
   8664 		/* Sync the DMA map. */
   8665 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8666 		    BUS_DMASYNC_PREWRITE);
   8667 
   8668 		/* Initialize the first transmit descriptor. */
   8669 		nexttx = txq->txq_next;
   8670 		if (!do_csum) {
   8671 			/* Setup a legacy descriptor */
   8672 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8673 			    dmamap->dm_segs[0].ds_addr);
   8674 			txq->txq_descs[nexttx].wtx_cmdlen =
   8675 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8676 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8677 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8678 			if (vlan_has_tag(m0)) {
   8679 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8680 				    htole32(WTX_CMD_VLE);
   8681 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8682 				    htole16(vlan_get_tag(m0));
   8683 			} else
   8684 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8685 
   8686 			dcmdlen = 0;
   8687 		} else {
   8688 			/* Setup an advanced data descriptor */
   8689 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8690 			    htole64(dmamap->dm_segs[0].ds_addr);
   8691 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8692 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8693 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8694 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8695 			    htole32(fields);
   8696 			DPRINTF(sc, WM_DEBUG_TX,
   8697 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8698 				device_xname(sc->sc_dev), nexttx,
   8699 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8700 			DPRINTF(sc, WM_DEBUG_TX,
   8701 			    ("\t 0x%08x%08x\n", fields,
   8702 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8703 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8704 		}
   8705 
   8706 		lasttx = nexttx;
   8707 		nexttx = WM_NEXTTX(txq, nexttx);
   8708 		/*
   8709 		 * Fill in the next descriptors. legacy or advanced format
   8710 		 * is the same here
   8711 		 */
   8712 		for (seg = 1; seg < dmamap->dm_nsegs;
   8713 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8714 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8715 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8716 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8717 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8718 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8719 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8720 			lasttx = nexttx;
   8721 
   8722 			DPRINTF(sc, WM_DEBUG_TX,
   8723 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8724 				device_xname(sc->sc_dev), nexttx,
   8725 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8726 				dmamap->dm_segs[seg].ds_len));
   8727 		}
   8728 
   8729 		KASSERT(lasttx != -1);
   8730 
   8731 		/*
   8732 		 * Set up the command byte on the last descriptor of
   8733 		 * the packet. If we're in the interrupt delay window,
   8734 		 * delay the interrupt.
   8735 		 */
   8736 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8737 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8738 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8739 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8740 
   8741 		txs->txs_lastdesc = lasttx;
   8742 
   8743 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8744 		    device_xname(sc->sc_dev),
   8745 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8746 
   8747 		/* Sync the descriptors we're using. */
   8748 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8749 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8750 
   8751 		/* Give the packet to the chip. */
   8752 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8753 		sent = true;
   8754 
   8755 		DPRINTF(sc, WM_DEBUG_TX,
   8756 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8757 
   8758 		DPRINTF(sc, WM_DEBUG_TX,
   8759 		    ("%s: TX: finished transmitting packet, job %d\n",
   8760 			device_xname(sc->sc_dev), txq->txq_snext));
   8761 
   8762 		/* Advance the tx pointer. */
   8763 		txq->txq_free -= txs->txs_ndesc;
   8764 		txq->txq_next = nexttx;
   8765 
   8766 		txq->txq_sfree--;
   8767 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8768 
   8769 		/* Pass the packet to any BPF listeners. */
   8770 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8771 	}
   8772 
   8773 	if (m0 != NULL) {
   8774 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8775 		WM_Q_EVCNT_INCR(txq, descdrop);
   8776 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8777 			__func__));
   8778 		m_freem(m0);
   8779 	}
   8780 
   8781 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8782 		/* No more slots; notify upper layer. */
   8783 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8784 	}
   8785 
   8786 	if (sent) {
   8787 		/* Set a watchdog timer in case the chip flakes out. */
   8788 		txq->txq_lastsent = time_uptime;
   8789 		txq->txq_sending = true;
   8790 	}
   8791 }
   8792 
   8793 static void
   8794 wm_deferred_start_locked(struct wm_txqueue *txq)
   8795 {
   8796 	struct wm_softc *sc = txq->txq_sc;
   8797 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8798 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8799 	int qid = wmq->wmq_id;
   8800 
   8801 	KASSERT(mutex_owned(txq->txq_lock));
   8802 
   8803 	if (txq->txq_stopping) {
   8804 		mutex_exit(txq->txq_lock);
   8805 		return;
   8806 	}
   8807 
   8808 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8809 		/* XXX need for ALTQ or one CPU system */
   8810 		if (qid == 0)
   8811 			wm_nq_start_locked(ifp);
   8812 		wm_nq_transmit_locked(ifp, txq);
   8813 	} else {
   8814 		/* XXX need for ALTQ or one CPU system */
   8815 		if (qid == 0)
   8816 			wm_start_locked(ifp);
   8817 		wm_transmit_locked(ifp, txq);
   8818 	}
   8819 }
   8820 
   8821 /* Interrupt */
   8822 
   8823 /*
   8824  * wm_txeof:
   8825  *
   8826  *	Helper; handle transmit interrupts.
   8827  */
   8828 static bool
   8829 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8830 {
   8831 	struct wm_softc *sc = txq->txq_sc;
   8832 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8833 	struct wm_txsoft *txs;
   8834 	int count = 0;
   8835 	int i;
   8836 	uint8_t status;
   8837 	bool more = false;
   8838 
   8839 	KASSERT(mutex_owned(txq->txq_lock));
   8840 
   8841 	if (txq->txq_stopping)
   8842 		return false;
   8843 
   8844 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8845 
   8846 	/*
   8847 	 * Go through the Tx list and free mbufs for those
   8848 	 * frames which have been transmitted.
   8849 	 */
   8850 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8851 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8852 		if (limit-- == 0) {
   8853 			more = true;
   8854 			DPRINTF(sc, WM_DEBUG_TX,
   8855 			    ("%s: TX: loop limited, job %d is not processed\n",
   8856 				device_xname(sc->sc_dev), i));
   8857 			break;
   8858 		}
   8859 
   8860 		txs = &txq->txq_soft[i];
   8861 
   8862 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8863 			device_xname(sc->sc_dev), i));
   8864 
   8865 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8866 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8867 
   8868 		status =
   8869 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8870 		if ((status & WTX_ST_DD) == 0) {
   8871 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8872 			    BUS_DMASYNC_PREREAD);
   8873 			break;
   8874 		}
   8875 
   8876 		count++;
   8877 		DPRINTF(sc, WM_DEBUG_TX,
   8878 		    ("%s: TX: job %d done: descs %d..%d\n",
   8879 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8880 		    txs->txs_lastdesc));
   8881 
   8882 		/*
   8883 		 * XXX We should probably be using the statistics
   8884 		 * XXX registers, but I don't know if they exist
   8885 		 * XXX on chips before the i82544.
   8886 		 */
   8887 
   8888 #ifdef WM_EVENT_COUNTERS
   8889 		if (status & WTX_ST_TU)
   8890 			WM_Q_EVCNT_INCR(txq, underrun);
   8891 #endif /* WM_EVENT_COUNTERS */
   8892 
   8893 		/*
   8894 		 * 82574 and newer's document says the status field has neither
   8895 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8896 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8897 		 * Developer's Manual", 82574 datasheet and newer.
   8898 		 *
   8899 		 * XXX I saw the LC bit was set on I218 even though the media
   8900 		 * was full duplex, so the bit might be used for other
   8901 		 * meaning ...(I have no document).
   8902 		 */
   8903 
   8904 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8905 		    && ((sc->sc_type < WM_T_82574)
   8906 			|| (sc->sc_type == WM_T_80003))) {
   8907 			if_statinc(ifp, if_oerrors);
   8908 			if (status & WTX_ST_LC)
   8909 				log(LOG_WARNING, "%s: late collision\n",
   8910 				    device_xname(sc->sc_dev));
   8911 			else if (status & WTX_ST_EC) {
   8912 				if_statadd(ifp, if_collisions,
   8913 				    TX_COLLISION_THRESHOLD + 1);
   8914 				log(LOG_WARNING, "%s: excessive collisions\n",
   8915 				    device_xname(sc->sc_dev));
   8916 			}
   8917 		} else
   8918 			if_statinc(ifp, if_opackets);
   8919 
   8920 		txq->txq_packets++;
   8921 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8922 
   8923 		txq->txq_free += txs->txs_ndesc;
   8924 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8925 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8926 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8927 		m_freem(txs->txs_mbuf);
   8928 		txs->txs_mbuf = NULL;
   8929 	}
   8930 
   8931 	/* Update the dirty transmit buffer pointer. */
   8932 	txq->txq_sdirty = i;
   8933 	DPRINTF(sc, WM_DEBUG_TX,
   8934 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8935 
   8936 	if (count != 0)
   8937 		rnd_add_uint32(&sc->rnd_source, count);
   8938 
   8939 	/*
   8940 	 * If there are no more pending transmissions, cancel the watchdog
   8941 	 * timer.
   8942 	 */
   8943 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8944 		txq->txq_sending = false;
   8945 
   8946 	return more;
   8947 }
   8948 
   8949 static inline uint32_t
   8950 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8951 {
   8952 	struct wm_softc *sc = rxq->rxq_sc;
   8953 
   8954 	if (sc->sc_type == WM_T_82574)
   8955 		return EXTRXC_STATUS(
   8956 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   8957 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8958 		return NQRXC_STATUS(
   8959 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   8960 	else
   8961 		return rxq->rxq_descs[idx].wrx_status;
   8962 }
   8963 
   8964 static inline uint32_t
   8965 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8966 {
   8967 	struct wm_softc *sc = rxq->rxq_sc;
   8968 
   8969 	if (sc->sc_type == WM_T_82574)
   8970 		return EXTRXC_ERROR(
   8971 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   8972 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8973 		return NQRXC_ERROR(
   8974 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   8975 	else
   8976 		return rxq->rxq_descs[idx].wrx_errors;
   8977 }
   8978 
   8979 static inline uint16_t
   8980 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8981 {
   8982 	struct wm_softc *sc = rxq->rxq_sc;
   8983 
   8984 	if (sc->sc_type == WM_T_82574)
   8985 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8986 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8987 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8988 	else
   8989 		return rxq->rxq_descs[idx].wrx_special;
   8990 }
   8991 
   8992 static inline int
   8993 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8994 {
   8995 	struct wm_softc *sc = rxq->rxq_sc;
   8996 
   8997 	if (sc->sc_type == WM_T_82574)
   8998 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8999 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9000 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9001 	else
   9002 		return rxq->rxq_descs[idx].wrx_len;
   9003 }
   9004 
   9005 #ifdef WM_DEBUG
   9006 static inline uint32_t
   9007 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9008 {
   9009 	struct wm_softc *sc = rxq->rxq_sc;
   9010 
   9011 	if (sc->sc_type == WM_T_82574)
   9012 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9013 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9014 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9015 	else
   9016 		return 0;
   9017 }
   9018 
   9019 static inline uint8_t
   9020 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9021 {
   9022 	struct wm_softc *sc = rxq->rxq_sc;
   9023 
   9024 	if (sc->sc_type == WM_T_82574)
   9025 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9026 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9027 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9028 	else
   9029 		return 0;
   9030 }
   9031 #endif /* WM_DEBUG */
   9032 
   9033 static inline bool
   9034 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9035     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9036 {
   9037 
   9038 	if (sc->sc_type == WM_T_82574)
   9039 		return (status & ext_bit) != 0;
   9040 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9041 		return (status & nq_bit) != 0;
   9042 	else
   9043 		return (status & legacy_bit) != 0;
   9044 }
   9045 
   9046 static inline bool
   9047 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9048     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9049 {
   9050 
   9051 	if (sc->sc_type == WM_T_82574)
   9052 		return (error & ext_bit) != 0;
   9053 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9054 		return (error & nq_bit) != 0;
   9055 	else
   9056 		return (error & legacy_bit) != 0;
   9057 }
   9058 
   9059 static inline bool
   9060 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9061 {
   9062 
   9063 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9064 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9065 		return true;
   9066 	else
   9067 		return false;
   9068 }
   9069 
   9070 static inline bool
   9071 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9072 {
   9073 	struct wm_softc *sc = rxq->rxq_sc;
   9074 
   9075 	/* XXX missing error bit for newqueue? */
   9076 	if (wm_rxdesc_is_set_error(sc, errors,
   9077 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9078 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9079 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9080 		NQRXC_ERROR_RXE)) {
   9081 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9082 		    EXTRXC_ERROR_SE, 0))
   9083 			log(LOG_WARNING, "%s: symbol error\n",
   9084 			    device_xname(sc->sc_dev));
   9085 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9086 		    EXTRXC_ERROR_SEQ, 0))
   9087 			log(LOG_WARNING, "%s: receive sequence error\n",
   9088 			    device_xname(sc->sc_dev));
   9089 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9090 		    EXTRXC_ERROR_CE, 0))
   9091 			log(LOG_WARNING, "%s: CRC error\n",
   9092 			    device_xname(sc->sc_dev));
   9093 		return true;
   9094 	}
   9095 
   9096 	return false;
   9097 }
   9098 
   9099 static inline bool
   9100 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9101 {
   9102 	struct wm_softc *sc = rxq->rxq_sc;
   9103 
   9104 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9105 		NQRXC_STATUS_DD)) {
   9106 		/* We have processed all of the receive descriptors. */
   9107 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9108 		return false;
   9109 	}
   9110 
   9111 	return true;
   9112 }
   9113 
   9114 static inline bool
   9115 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9116     uint16_t vlantag, struct mbuf *m)
   9117 {
   9118 
   9119 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9120 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9121 		vlan_set_tag(m, le16toh(vlantag));
   9122 	}
   9123 
   9124 	return true;
   9125 }
   9126 
   9127 static inline void
   9128 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9129     uint32_t errors, struct mbuf *m)
   9130 {
   9131 	struct wm_softc *sc = rxq->rxq_sc;
   9132 
   9133 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9134 		if (wm_rxdesc_is_set_status(sc, status,
   9135 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9136 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9137 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9138 			if (wm_rxdesc_is_set_error(sc, errors,
   9139 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9140 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9141 		}
   9142 		if (wm_rxdesc_is_set_status(sc, status,
   9143 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9144 			/*
   9145 			 * Note: we don't know if this was TCP or UDP,
   9146 			 * so we just set both bits, and expect the
   9147 			 * upper layers to deal.
   9148 			 */
   9149 			WM_Q_EVCNT_INCR(rxq, tusum);
   9150 			m->m_pkthdr.csum_flags |=
   9151 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9152 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9153 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9154 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9155 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9156 		}
   9157 	}
   9158 }
   9159 
   9160 /*
   9161  * wm_rxeof:
   9162  *
   9163  *	Helper; handle receive interrupts.
   9164  */
   9165 static bool
   9166 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9167 {
   9168 	struct wm_softc *sc = rxq->rxq_sc;
   9169 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9170 	struct wm_rxsoft *rxs;
   9171 	struct mbuf *m;
   9172 	int i, len;
   9173 	int count = 0;
   9174 	uint32_t status, errors;
   9175 	uint16_t vlantag;
   9176 	bool more = false;
   9177 
   9178 	KASSERT(mutex_owned(rxq->rxq_lock));
   9179 
   9180 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9181 		if (limit-- == 0) {
   9182 			more = true;
   9183 			DPRINTF(sc, WM_DEBUG_RX,
   9184 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9185 				device_xname(sc->sc_dev), i));
   9186 			break;
   9187 		}
   9188 
   9189 		rxs = &rxq->rxq_soft[i];
   9190 
   9191 		DPRINTF(sc, WM_DEBUG_RX,
   9192 		    ("%s: RX: checking descriptor %d\n",
   9193 			device_xname(sc->sc_dev), i));
   9194 		wm_cdrxsync(rxq, i,
   9195 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9196 
   9197 		status = wm_rxdesc_get_status(rxq, i);
   9198 		errors = wm_rxdesc_get_errors(rxq, i);
   9199 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9200 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9201 #ifdef WM_DEBUG
   9202 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9203 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9204 #endif
   9205 
   9206 		if (!wm_rxdesc_dd(rxq, i, status)) {
   9207 			break;
   9208 		}
   9209 
   9210 		count++;
   9211 		if (__predict_false(rxq->rxq_discard)) {
   9212 			DPRINTF(sc, WM_DEBUG_RX,
   9213 			    ("%s: RX: discarding contents of descriptor %d\n",
   9214 				device_xname(sc->sc_dev), i));
   9215 			wm_init_rxdesc(rxq, i);
   9216 			if (wm_rxdesc_is_eop(rxq, status)) {
   9217 				/* Reset our state. */
   9218 				DPRINTF(sc, WM_DEBUG_RX,
   9219 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9220 					device_xname(sc->sc_dev)));
   9221 				rxq->rxq_discard = 0;
   9222 			}
   9223 			continue;
   9224 		}
   9225 
   9226 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9227 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9228 
   9229 		m = rxs->rxs_mbuf;
   9230 
   9231 		/*
   9232 		 * Add a new receive buffer to the ring, unless of
   9233 		 * course the length is zero. Treat the latter as a
   9234 		 * failed mapping.
   9235 		 */
   9236 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9237 			/*
   9238 			 * Failed, throw away what we've done so
   9239 			 * far, and discard the rest of the packet.
   9240 			 */
   9241 			if_statinc(ifp, if_ierrors);
   9242 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9243 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9244 			wm_init_rxdesc(rxq, i);
   9245 			if (!wm_rxdesc_is_eop(rxq, status))
   9246 				rxq->rxq_discard = 1;
   9247 			if (rxq->rxq_head != NULL)
   9248 				m_freem(rxq->rxq_head);
   9249 			WM_RXCHAIN_RESET(rxq);
   9250 			DPRINTF(sc, WM_DEBUG_RX,
   9251 			    ("%s: RX: Rx buffer allocation failed, "
   9252 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9253 				rxq->rxq_discard ? " (discard)" : ""));
   9254 			continue;
   9255 		}
   9256 
   9257 		m->m_len = len;
   9258 		rxq->rxq_len += len;
   9259 		DPRINTF(sc, WM_DEBUG_RX,
   9260 		    ("%s: RX: buffer at %p len %d\n",
   9261 			device_xname(sc->sc_dev), m->m_data, len));
   9262 
   9263 		/* If this is not the end of the packet, keep looking. */
   9264 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9265 			WM_RXCHAIN_LINK(rxq, m);
   9266 			DPRINTF(sc, WM_DEBUG_RX,
   9267 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9268 				device_xname(sc->sc_dev), rxq->rxq_len));
   9269 			continue;
   9270 		}
   9271 
   9272 		/*
   9273 		 * Okay, we have the entire packet now. The chip is
   9274 		 * configured to include the FCS except I35[04], I21[01].
   9275 		 * (not all chips can be configured to strip it), so we need
   9276 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9277 		 * in RCTL register is always set, so we don't trim it.
   9278 		 * PCH2 and newer chip also not include FCS when jumbo
   9279 		 * frame is used to do workaround an errata.
   9280 		 * May need to adjust length of previous mbuf in the
   9281 		 * chain if the current mbuf is too short.
   9282 		 */
   9283 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9284 			if (m->m_len < ETHER_CRC_LEN) {
   9285 				rxq->rxq_tail->m_len
   9286 				    -= (ETHER_CRC_LEN - m->m_len);
   9287 				m->m_len = 0;
   9288 			} else
   9289 				m->m_len -= ETHER_CRC_LEN;
   9290 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9291 		} else
   9292 			len = rxq->rxq_len;
   9293 
   9294 		WM_RXCHAIN_LINK(rxq, m);
   9295 
   9296 		*rxq->rxq_tailp = NULL;
   9297 		m = rxq->rxq_head;
   9298 
   9299 		WM_RXCHAIN_RESET(rxq);
   9300 
   9301 		DPRINTF(sc, WM_DEBUG_RX,
   9302 		    ("%s: RX: have entire packet, len -> %d\n",
   9303 			device_xname(sc->sc_dev), len));
   9304 
   9305 		/* If an error occurred, update stats and drop the packet. */
   9306 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9307 			m_freem(m);
   9308 			continue;
   9309 		}
   9310 
   9311 		/* No errors.  Receive the packet. */
   9312 		m_set_rcvif(m, ifp);
   9313 		m->m_pkthdr.len = len;
   9314 		/*
   9315 		 * TODO
   9316 		 * should be save rsshash and rsstype to this mbuf.
   9317 		 */
   9318 		DPRINTF(sc, WM_DEBUG_RX,
   9319 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9320 			device_xname(sc->sc_dev), rsstype, rsshash));
   9321 
   9322 		/*
   9323 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9324 		 * for us.  Associate the tag with the packet.
   9325 		 */
   9326 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9327 			continue;
   9328 
   9329 		/* Set up checksum info for this packet. */
   9330 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9331 
   9332 		rxq->rxq_packets++;
   9333 		rxq->rxq_bytes += len;
   9334 		/* Pass it on. */
   9335 		if_percpuq_enqueue(sc->sc_ipq, m);
   9336 
   9337 		if (rxq->rxq_stopping)
   9338 			break;
   9339 	}
   9340 	rxq->rxq_ptr = i;
   9341 
   9342 	if (count != 0)
   9343 		rnd_add_uint32(&sc->rnd_source, count);
   9344 
   9345 	DPRINTF(sc, WM_DEBUG_RX,
   9346 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9347 
   9348 	return more;
   9349 }
   9350 
   9351 /*
   9352  * wm_linkintr_gmii:
   9353  *
   9354  *	Helper; handle link interrupts for GMII.
   9355  */
   9356 static void
   9357 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9358 {
   9359 	device_t dev = sc->sc_dev;
   9360 	uint32_t status, reg;
   9361 	bool link;
   9362 	int rv;
   9363 
   9364 	KASSERT(WM_CORE_LOCKED(sc));
   9365 
   9366 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9367 		__func__));
   9368 
   9369 	if ((icr & ICR_LSC) == 0) {
   9370 		if (icr & ICR_RXSEQ)
   9371 			DPRINTF(sc, WM_DEBUG_LINK,
   9372 			    ("%s: LINK Receive sequence error\n",
   9373 				device_xname(dev)));
   9374 		return;
   9375 	}
   9376 
   9377 	/* Link status changed */
   9378 	status = CSR_READ(sc, WMREG_STATUS);
   9379 	link = status & STATUS_LU;
   9380 	if (link) {
   9381 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9382 			device_xname(dev),
   9383 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9384 		if (wm_phy_need_linkdown_discard(sc))
   9385 			wm_clear_linkdown_discard(sc);
   9386 	} else {
   9387 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9388 			device_xname(dev)));
   9389 		if (wm_phy_need_linkdown_discard(sc))
   9390 			wm_set_linkdown_discard(sc);
   9391 	}
   9392 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9393 		wm_gig_downshift_workaround_ich8lan(sc);
   9394 
   9395 	if ((sc->sc_type == WM_T_ICH8)
   9396 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9397 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9398 	}
   9399 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9400 		device_xname(dev)));
   9401 	mii_pollstat(&sc->sc_mii);
   9402 	if (sc->sc_type == WM_T_82543) {
   9403 		int miistatus, active;
   9404 
   9405 		/*
   9406 		 * With 82543, we need to force speed and
   9407 		 * duplex on the MAC equal to what the PHY
   9408 		 * speed and duplex configuration is.
   9409 		 */
   9410 		miistatus = sc->sc_mii.mii_media_status;
   9411 
   9412 		if (miistatus & IFM_ACTIVE) {
   9413 			active = sc->sc_mii.mii_media_active;
   9414 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9415 			switch (IFM_SUBTYPE(active)) {
   9416 			case IFM_10_T:
   9417 				sc->sc_ctrl |= CTRL_SPEED_10;
   9418 				break;
   9419 			case IFM_100_TX:
   9420 				sc->sc_ctrl |= CTRL_SPEED_100;
   9421 				break;
   9422 			case IFM_1000_T:
   9423 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9424 				break;
   9425 			default:
   9426 				/*
   9427 				 * Fiber?
   9428 				 * Shoud not enter here.
   9429 				 */
   9430 				device_printf(dev, "unknown media (%x)\n",
   9431 				    active);
   9432 				break;
   9433 			}
   9434 			if (active & IFM_FDX)
   9435 				sc->sc_ctrl |= CTRL_FD;
   9436 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9437 		}
   9438 	} else if (sc->sc_type == WM_T_PCH) {
   9439 		wm_k1_gig_workaround_hv(sc,
   9440 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9441 	}
   9442 
   9443 	/*
   9444 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9445 	 * aggressive resulting in many collisions. To avoid this, increase
   9446 	 * the IPG and reduce Rx latency in the PHY.
   9447 	 */
   9448 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9449 	    && link) {
   9450 		uint32_t tipg_reg;
   9451 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9452 		bool fdx;
   9453 		uint16_t emi_addr, emi_val;
   9454 
   9455 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9456 		tipg_reg &= ~TIPG_IPGT_MASK;
   9457 		fdx = status & STATUS_FD;
   9458 
   9459 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9460 			tipg_reg |= 0xff;
   9461 			/* Reduce Rx latency in analog PHY */
   9462 			emi_val = 0;
   9463 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9464 		    fdx && speed != STATUS_SPEED_1000) {
   9465 			tipg_reg |= 0xc;
   9466 			emi_val = 1;
   9467 		} else {
   9468 			/* Roll back the default values */
   9469 			tipg_reg |= 0x08;
   9470 			emi_val = 1;
   9471 		}
   9472 
   9473 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9474 
   9475 		rv = sc->phy.acquire(sc);
   9476 		if (rv)
   9477 			return;
   9478 
   9479 		if (sc->sc_type == WM_T_PCH2)
   9480 			emi_addr = I82579_RX_CONFIG;
   9481 		else
   9482 			emi_addr = I217_RX_CONFIG;
   9483 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9484 
   9485 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9486 			uint16_t phy_reg;
   9487 
   9488 			sc->phy.readreg_locked(dev, 2,
   9489 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9490 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9491 			if (speed == STATUS_SPEED_100
   9492 			    || speed == STATUS_SPEED_10)
   9493 				phy_reg |= 0x3e8;
   9494 			else
   9495 				phy_reg |= 0xfa;
   9496 			sc->phy.writereg_locked(dev, 2,
   9497 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9498 
   9499 			if (speed == STATUS_SPEED_1000) {
   9500 				sc->phy.readreg_locked(dev, 2,
   9501 				    HV_PM_CTRL, &phy_reg);
   9502 
   9503 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9504 
   9505 				sc->phy.writereg_locked(dev, 2,
   9506 				    HV_PM_CTRL, phy_reg);
   9507 			}
   9508 		}
   9509 		sc->phy.release(sc);
   9510 
   9511 		if (rv)
   9512 			return;
   9513 
   9514 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9515 			uint16_t data, ptr_gap;
   9516 
   9517 			if (speed == STATUS_SPEED_1000) {
   9518 				rv = sc->phy.acquire(sc);
   9519 				if (rv)
   9520 					return;
   9521 
   9522 				rv = sc->phy.readreg_locked(dev, 2,
   9523 				    I82579_UNKNOWN1, &data);
   9524 				if (rv) {
   9525 					sc->phy.release(sc);
   9526 					return;
   9527 				}
   9528 
   9529 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9530 				if (ptr_gap < 0x18) {
   9531 					data &= ~(0x3ff << 2);
   9532 					data |= (0x18 << 2);
   9533 					rv = sc->phy.writereg_locked(dev,
   9534 					    2, I82579_UNKNOWN1, data);
   9535 				}
   9536 				sc->phy.release(sc);
   9537 				if (rv)
   9538 					return;
   9539 			} else {
   9540 				rv = sc->phy.acquire(sc);
   9541 				if (rv)
   9542 					return;
   9543 
   9544 				rv = sc->phy.writereg_locked(dev, 2,
   9545 				    I82579_UNKNOWN1, 0xc023);
   9546 				sc->phy.release(sc);
   9547 				if (rv)
   9548 					return;
   9549 
   9550 			}
   9551 		}
   9552 	}
   9553 
   9554 	/*
   9555 	 * I217 Packet Loss issue:
   9556 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9557 	 * on power up.
   9558 	 * Set the Beacon Duration for I217 to 8 usec
   9559 	 */
   9560 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9561 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9562 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9563 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9564 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9565 	}
   9566 
   9567 	/* Work-around I218 hang issue */
   9568 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9569 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9570 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9571 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9572 		wm_k1_workaround_lpt_lp(sc, link);
   9573 
   9574 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9575 		/*
   9576 		 * Set platform power management values for Latency
   9577 		 * Tolerance Reporting (LTR)
   9578 		 */
   9579 		wm_platform_pm_pch_lpt(sc,
   9580 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9581 	}
   9582 
   9583 	/* Clear link partner's EEE ability */
   9584 	sc->eee_lp_ability = 0;
   9585 
   9586 	/* FEXTNVM6 K1-off workaround */
   9587 	if (sc->sc_type == WM_T_PCH_SPT) {
   9588 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9589 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9590 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9591 		else
   9592 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9593 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9594 	}
   9595 
   9596 	if (!link)
   9597 		return;
   9598 
   9599 	switch (sc->sc_type) {
   9600 	case WM_T_PCH2:
   9601 		wm_k1_workaround_lv(sc);
   9602 		/* FALLTHROUGH */
   9603 	case WM_T_PCH:
   9604 		if (sc->sc_phytype == WMPHY_82578)
   9605 			wm_link_stall_workaround_hv(sc);
   9606 		break;
   9607 	default:
   9608 		break;
   9609 	}
   9610 
   9611 	/* Enable/Disable EEE after link up */
   9612 	if (sc->sc_phytype > WMPHY_82579)
   9613 		wm_set_eee_pchlan(sc);
   9614 }
   9615 
   9616 /*
   9617  * wm_linkintr_tbi:
   9618  *
   9619  *	Helper; handle link interrupts for TBI mode.
   9620  */
   9621 static void
   9622 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9623 {
   9624 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9625 	uint32_t status;
   9626 
   9627 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9628 		__func__));
   9629 
   9630 	status = CSR_READ(sc, WMREG_STATUS);
   9631 	if (icr & ICR_LSC) {
   9632 		wm_check_for_link(sc);
   9633 		if (status & STATUS_LU) {
   9634 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9635 				device_xname(sc->sc_dev),
   9636 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9637 			/*
   9638 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9639 			 * so we should update sc->sc_ctrl
   9640 			 */
   9641 
   9642 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9643 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9644 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9645 			if (status & STATUS_FD)
   9646 				sc->sc_tctl |=
   9647 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9648 			else
   9649 				sc->sc_tctl |=
   9650 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9651 			if (sc->sc_ctrl & CTRL_TFCE)
   9652 				sc->sc_fcrtl |= FCRTL_XONE;
   9653 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9654 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9655 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9656 			sc->sc_tbi_linkup = 1;
   9657 			if_link_state_change(ifp, LINK_STATE_UP);
   9658 		} else {
   9659 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9660 				device_xname(sc->sc_dev)));
   9661 			sc->sc_tbi_linkup = 0;
   9662 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9663 		}
   9664 		/* Update LED */
   9665 		wm_tbi_serdes_set_linkled(sc);
   9666 	} else if (icr & ICR_RXSEQ)
   9667 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9668 			device_xname(sc->sc_dev)));
   9669 }
   9670 
   9671 /*
   9672  * wm_linkintr_serdes:
   9673  *
   9674  *	Helper; handle link interrupts for TBI mode.
   9675  */
   9676 static void
   9677 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9678 {
   9679 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9680 	struct mii_data *mii = &sc->sc_mii;
   9681 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9682 	uint32_t pcs_adv, pcs_lpab, reg;
   9683 
   9684 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9685 		__func__));
   9686 
   9687 	if (icr & ICR_LSC) {
   9688 		/* Check PCS */
   9689 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9690 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9691 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9692 				device_xname(sc->sc_dev)));
   9693 			mii->mii_media_status |= IFM_ACTIVE;
   9694 			sc->sc_tbi_linkup = 1;
   9695 			if_link_state_change(ifp, LINK_STATE_UP);
   9696 		} else {
   9697 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9698 				device_xname(sc->sc_dev)));
   9699 			mii->mii_media_status |= IFM_NONE;
   9700 			sc->sc_tbi_linkup = 0;
   9701 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9702 			wm_tbi_serdes_set_linkled(sc);
   9703 			return;
   9704 		}
   9705 		mii->mii_media_active |= IFM_1000_SX;
   9706 		if ((reg & PCS_LSTS_FDX) != 0)
   9707 			mii->mii_media_active |= IFM_FDX;
   9708 		else
   9709 			mii->mii_media_active |= IFM_HDX;
   9710 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9711 			/* Check flow */
   9712 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9713 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9714 				DPRINTF(sc, WM_DEBUG_LINK,
   9715 				    ("XXX LINKOK but not ACOMP\n"));
   9716 				return;
   9717 			}
   9718 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9719 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9720 			DPRINTF(sc, WM_DEBUG_LINK,
   9721 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9722 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9723 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9724 				mii->mii_media_active |= IFM_FLOW
   9725 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9726 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9727 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9728 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9729 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9730 				mii->mii_media_active |= IFM_FLOW
   9731 				    | IFM_ETH_TXPAUSE;
   9732 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9733 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9734 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9735 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9736 				mii->mii_media_active |= IFM_FLOW
   9737 				    | IFM_ETH_RXPAUSE;
   9738 		}
   9739 		/* Update LED */
   9740 		wm_tbi_serdes_set_linkled(sc);
   9741 	} else
   9742 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9743 		    device_xname(sc->sc_dev)));
   9744 }
   9745 
   9746 /*
   9747  * wm_linkintr:
   9748  *
   9749  *	Helper; handle link interrupts.
   9750  */
   9751 static void
   9752 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9753 {
   9754 
   9755 	KASSERT(WM_CORE_LOCKED(sc));
   9756 
   9757 	if (sc->sc_flags & WM_F_HAS_MII)
   9758 		wm_linkintr_gmii(sc, icr);
   9759 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9760 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9761 		wm_linkintr_serdes(sc, icr);
   9762 	else
   9763 		wm_linkintr_tbi(sc, icr);
   9764 }
   9765 
   9766 
   9767 static inline void
   9768 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9769 {
   9770 
   9771 	if (wmq->wmq_txrx_use_workqueue)
   9772 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9773 	else
   9774 		softint_schedule(wmq->wmq_si);
   9775 }
   9776 
   9777 /*
   9778  * wm_intr_legacy:
   9779  *
   9780  *	Interrupt service routine for INTx and MSI.
   9781  */
   9782 static int
   9783 wm_intr_legacy(void *arg)
   9784 {
   9785 	struct wm_softc *sc = arg;
   9786 	struct wm_queue *wmq = &sc->sc_queue[0];
   9787 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9788 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9789 	uint32_t icr, rndval = 0;
   9790 	int handled = 0;
   9791 
   9792 	while (1 /* CONSTCOND */) {
   9793 		icr = CSR_READ(sc, WMREG_ICR);
   9794 		if ((icr & sc->sc_icr) == 0)
   9795 			break;
   9796 		if (handled == 0)
   9797 			DPRINTF(sc, WM_DEBUG_TX,
   9798 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9799 		if (rndval == 0)
   9800 			rndval = icr;
   9801 
   9802 		mutex_enter(rxq->rxq_lock);
   9803 
   9804 		if (rxq->rxq_stopping) {
   9805 			mutex_exit(rxq->rxq_lock);
   9806 			break;
   9807 		}
   9808 
   9809 		handled = 1;
   9810 
   9811 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9812 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9813 			DPRINTF(sc, WM_DEBUG_RX,
   9814 			    ("%s: RX: got Rx intr 0x%08x\n",
   9815 				device_xname(sc->sc_dev),
   9816 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9817 			WM_Q_EVCNT_INCR(rxq, intr);
   9818 		}
   9819 #endif
   9820 		/*
   9821 		 * wm_rxeof() does *not* call upper layer functions directly,
   9822 		 * as if_percpuq_enqueue() just call softint_schedule().
   9823 		 * So, we can call wm_rxeof() in interrupt context.
   9824 		 */
   9825 		wm_rxeof(rxq, UINT_MAX);
   9826 
   9827 		mutex_exit(rxq->rxq_lock);
   9828 		mutex_enter(txq->txq_lock);
   9829 
   9830 		if (txq->txq_stopping) {
   9831 			mutex_exit(txq->txq_lock);
   9832 			break;
   9833 		}
   9834 
   9835 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9836 		if (icr & ICR_TXDW) {
   9837 			DPRINTF(sc, WM_DEBUG_TX,
   9838 			    ("%s: TX: got TXDW interrupt\n",
   9839 				device_xname(sc->sc_dev)));
   9840 			WM_Q_EVCNT_INCR(txq, txdw);
   9841 		}
   9842 #endif
   9843 		wm_txeof(txq, UINT_MAX);
   9844 
   9845 		mutex_exit(txq->txq_lock);
   9846 		WM_CORE_LOCK(sc);
   9847 
   9848 		if (sc->sc_core_stopping) {
   9849 			WM_CORE_UNLOCK(sc);
   9850 			break;
   9851 		}
   9852 
   9853 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9854 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9855 			wm_linkintr(sc, icr);
   9856 		}
   9857 		if ((icr & ICR_GPI(0)) != 0)
   9858 			device_printf(sc->sc_dev, "got module interrupt\n");
   9859 
   9860 		WM_CORE_UNLOCK(sc);
   9861 
   9862 		if (icr & ICR_RXO) {
   9863 #if defined(WM_DEBUG)
   9864 			log(LOG_WARNING, "%s: Receive overrun\n",
   9865 			    device_xname(sc->sc_dev));
   9866 #endif /* defined(WM_DEBUG) */
   9867 		}
   9868 	}
   9869 
   9870 	rnd_add_uint32(&sc->rnd_source, rndval);
   9871 
   9872 	if (handled) {
   9873 		/* Try to get more packets going. */
   9874 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9875 		wm_sched_handle_queue(sc, wmq);
   9876 	}
   9877 
   9878 	return handled;
   9879 }
   9880 
   9881 static inline void
   9882 wm_txrxintr_disable(struct wm_queue *wmq)
   9883 {
   9884 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9885 
   9886 	if (sc->sc_type == WM_T_82574)
   9887 		CSR_WRITE(sc, WMREG_IMC,
   9888 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9889 	else if (sc->sc_type == WM_T_82575)
   9890 		CSR_WRITE(sc, WMREG_EIMC,
   9891 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9892 	else
   9893 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9894 }
   9895 
   9896 static inline void
   9897 wm_txrxintr_enable(struct wm_queue *wmq)
   9898 {
   9899 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9900 
   9901 	wm_itrs_calculate(sc, wmq);
   9902 
   9903 	/*
   9904 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9905 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9906 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9907 	 * while each wm_handle_queue(wmq) is runnig.
   9908 	 */
   9909 	if (sc->sc_type == WM_T_82574)
   9910 		CSR_WRITE(sc, WMREG_IMS,
   9911 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9912 	else if (sc->sc_type == WM_T_82575)
   9913 		CSR_WRITE(sc, WMREG_EIMS,
   9914 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9915 	else
   9916 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9917 }
   9918 
   9919 static int
   9920 wm_txrxintr_msix(void *arg)
   9921 {
   9922 	struct wm_queue *wmq = arg;
   9923 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9924 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9925 	struct wm_softc *sc = txq->txq_sc;
   9926 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9927 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9928 	bool txmore;
   9929 	bool rxmore;
   9930 
   9931 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9932 
   9933 	DPRINTF(sc, WM_DEBUG_TX,
   9934 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9935 
   9936 	wm_txrxintr_disable(wmq);
   9937 
   9938 	mutex_enter(txq->txq_lock);
   9939 
   9940 	if (txq->txq_stopping) {
   9941 		mutex_exit(txq->txq_lock);
   9942 		return 0;
   9943 	}
   9944 
   9945 	WM_Q_EVCNT_INCR(txq, txdw);
   9946 	txmore = wm_txeof(txq, txlimit);
   9947 	/* wm_deferred start() is done in wm_handle_queue(). */
   9948 	mutex_exit(txq->txq_lock);
   9949 
   9950 	DPRINTF(sc, WM_DEBUG_RX,
   9951 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9952 	mutex_enter(rxq->rxq_lock);
   9953 
   9954 	if (rxq->rxq_stopping) {
   9955 		mutex_exit(rxq->rxq_lock);
   9956 		return 0;
   9957 	}
   9958 
   9959 	WM_Q_EVCNT_INCR(rxq, intr);
   9960 	rxmore = wm_rxeof(rxq, rxlimit);
   9961 	mutex_exit(rxq->rxq_lock);
   9962 
   9963 	wm_itrs_writereg(sc, wmq);
   9964 
   9965 	if (txmore || rxmore) {
   9966 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9967 		wm_sched_handle_queue(sc, wmq);
   9968 	} else
   9969 		wm_txrxintr_enable(wmq);
   9970 
   9971 	return 1;
   9972 }
   9973 
   9974 static void
   9975 wm_handle_queue(void *arg)
   9976 {
   9977 	struct wm_queue *wmq = arg;
   9978 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9979 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9980 	struct wm_softc *sc = txq->txq_sc;
   9981 	u_int txlimit = sc->sc_tx_process_limit;
   9982 	u_int rxlimit = sc->sc_rx_process_limit;
   9983 	bool txmore;
   9984 	bool rxmore;
   9985 
   9986 	mutex_enter(txq->txq_lock);
   9987 	if (txq->txq_stopping) {
   9988 		mutex_exit(txq->txq_lock);
   9989 		return;
   9990 	}
   9991 	txmore = wm_txeof(txq, txlimit);
   9992 	wm_deferred_start_locked(txq);
   9993 	mutex_exit(txq->txq_lock);
   9994 
   9995 	mutex_enter(rxq->rxq_lock);
   9996 	if (rxq->rxq_stopping) {
   9997 		mutex_exit(rxq->rxq_lock);
   9998 		return;
   9999 	}
   10000 	WM_Q_EVCNT_INCR(rxq, defer);
   10001 	rxmore = wm_rxeof(rxq, rxlimit);
   10002 	mutex_exit(rxq->rxq_lock);
   10003 
   10004 	if (txmore || rxmore) {
   10005 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10006 		wm_sched_handle_queue(sc, wmq);
   10007 	} else
   10008 		wm_txrxintr_enable(wmq);
   10009 }
   10010 
   10011 static void
   10012 wm_handle_queue_work(struct work *wk, void *context)
   10013 {
   10014 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10015 
   10016 	/*
   10017 	 * "enqueued flag" is not required here.
   10018 	 */
   10019 	wm_handle_queue(wmq);
   10020 }
   10021 
   10022 /*
   10023  * wm_linkintr_msix:
   10024  *
   10025  *	Interrupt service routine for link status change for MSI-X.
   10026  */
   10027 static int
   10028 wm_linkintr_msix(void *arg)
   10029 {
   10030 	struct wm_softc *sc = arg;
   10031 	uint32_t reg;
   10032 	bool has_rxo;
   10033 
   10034 	reg = CSR_READ(sc, WMREG_ICR);
   10035 	WM_CORE_LOCK(sc);
   10036 	DPRINTF(sc, WM_DEBUG_LINK,
   10037 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10038 		device_xname(sc->sc_dev), reg));
   10039 
   10040 	if (sc->sc_core_stopping)
   10041 		goto out;
   10042 
   10043 	if ((reg & ICR_LSC) != 0) {
   10044 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10045 		wm_linkintr(sc, ICR_LSC);
   10046 	}
   10047 	if ((reg & ICR_GPI(0)) != 0)
   10048 		device_printf(sc->sc_dev, "got module interrupt\n");
   10049 
   10050 	/*
   10051 	 * XXX 82574 MSI-X mode workaround
   10052 	 *
   10053 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10054 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10055 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10056 	 * interrupts by writing WMREG_ICS to process receive packets.
   10057 	 */
   10058 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10059 #if defined(WM_DEBUG)
   10060 		log(LOG_WARNING, "%s: Receive overrun\n",
   10061 		    device_xname(sc->sc_dev));
   10062 #endif /* defined(WM_DEBUG) */
   10063 
   10064 		has_rxo = true;
   10065 		/*
   10066 		 * The RXO interrupt is very high rate when receive traffic is
   10067 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10068 		 * interrupts. ICR_OTHER will be enabled at the end of
   10069 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10070 		 * ICR_RXQ(1) interrupts.
   10071 		 */
   10072 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10073 
   10074 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10075 	}
   10076 
   10077 
   10078 
   10079 out:
   10080 	WM_CORE_UNLOCK(sc);
   10081 
   10082 	if (sc->sc_type == WM_T_82574) {
   10083 		if (!has_rxo)
   10084 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10085 		else
   10086 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10087 	} else if (sc->sc_type == WM_T_82575)
   10088 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10089 	else
   10090 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10091 
   10092 	return 1;
   10093 }
   10094 
   10095 /*
   10096  * Media related.
   10097  * GMII, SGMII, TBI (and SERDES)
   10098  */
   10099 
   10100 /* Common */
   10101 
   10102 /*
   10103  * wm_tbi_serdes_set_linkled:
   10104  *
   10105  *	Update the link LED on TBI and SERDES devices.
   10106  */
   10107 static void
   10108 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10109 {
   10110 
   10111 	if (sc->sc_tbi_linkup)
   10112 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10113 	else
   10114 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10115 
   10116 	/* 82540 or newer devices are active low */
   10117 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10118 
   10119 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10120 }
   10121 
   10122 /* GMII related */
   10123 
   10124 /*
   10125  * wm_gmii_reset:
   10126  *
   10127  *	Reset the PHY.
   10128  */
   10129 static void
   10130 wm_gmii_reset(struct wm_softc *sc)
   10131 {
   10132 	uint32_t reg;
   10133 	int rv;
   10134 
   10135 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10136 		device_xname(sc->sc_dev), __func__));
   10137 
   10138 	rv = sc->phy.acquire(sc);
   10139 	if (rv != 0) {
   10140 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10141 		    __func__);
   10142 		return;
   10143 	}
   10144 
   10145 	switch (sc->sc_type) {
   10146 	case WM_T_82542_2_0:
   10147 	case WM_T_82542_2_1:
   10148 		/* null */
   10149 		break;
   10150 	case WM_T_82543:
   10151 		/*
   10152 		 * With 82543, we need to force speed and duplex on the MAC
   10153 		 * equal to what the PHY speed and duplex configuration is.
   10154 		 * In addition, we need to perform a hardware reset on the PHY
   10155 		 * to take it out of reset.
   10156 		 */
   10157 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10158 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10159 
   10160 		/* The PHY reset pin is active-low. */
   10161 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10162 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10163 		    CTRL_EXT_SWDPIN(4));
   10164 		reg |= CTRL_EXT_SWDPIO(4);
   10165 
   10166 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10167 		CSR_WRITE_FLUSH(sc);
   10168 		delay(10*1000);
   10169 
   10170 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10171 		CSR_WRITE_FLUSH(sc);
   10172 		delay(150);
   10173 #if 0
   10174 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10175 #endif
   10176 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10177 		break;
   10178 	case WM_T_82544:	/* Reset 10000us */
   10179 	case WM_T_82540:
   10180 	case WM_T_82545:
   10181 	case WM_T_82545_3:
   10182 	case WM_T_82546:
   10183 	case WM_T_82546_3:
   10184 	case WM_T_82541:
   10185 	case WM_T_82541_2:
   10186 	case WM_T_82547:
   10187 	case WM_T_82547_2:
   10188 	case WM_T_82571:	/* Reset 100us */
   10189 	case WM_T_82572:
   10190 	case WM_T_82573:
   10191 	case WM_T_82574:
   10192 	case WM_T_82575:
   10193 	case WM_T_82576:
   10194 	case WM_T_82580:
   10195 	case WM_T_I350:
   10196 	case WM_T_I354:
   10197 	case WM_T_I210:
   10198 	case WM_T_I211:
   10199 	case WM_T_82583:
   10200 	case WM_T_80003:
   10201 		/* Generic reset */
   10202 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10203 		CSR_WRITE_FLUSH(sc);
   10204 		delay(20000);
   10205 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10206 		CSR_WRITE_FLUSH(sc);
   10207 		delay(20000);
   10208 
   10209 		if ((sc->sc_type == WM_T_82541)
   10210 		    || (sc->sc_type == WM_T_82541_2)
   10211 		    || (sc->sc_type == WM_T_82547)
   10212 		    || (sc->sc_type == WM_T_82547_2)) {
   10213 			/* Workaround for igp are done in igp_reset() */
   10214 			/* XXX add code to set LED after phy reset */
   10215 		}
   10216 		break;
   10217 	case WM_T_ICH8:
   10218 	case WM_T_ICH9:
   10219 	case WM_T_ICH10:
   10220 	case WM_T_PCH:
   10221 	case WM_T_PCH2:
   10222 	case WM_T_PCH_LPT:
   10223 	case WM_T_PCH_SPT:
   10224 	case WM_T_PCH_CNP:
   10225 		/* Generic reset */
   10226 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10227 		CSR_WRITE_FLUSH(sc);
   10228 		delay(100);
   10229 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10230 		CSR_WRITE_FLUSH(sc);
   10231 		delay(150);
   10232 		break;
   10233 	default:
   10234 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10235 		    __func__);
   10236 		break;
   10237 	}
   10238 
   10239 	sc->phy.release(sc);
   10240 
   10241 	/* get_cfg_done */
   10242 	wm_get_cfg_done(sc);
   10243 
   10244 	/* Extra setup */
   10245 	switch (sc->sc_type) {
   10246 	case WM_T_82542_2_0:
   10247 	case WM_T_82542_2_1:
   10248 	case WM_T_82543:
   10249 	case WM_T_82544:
   10250 	case WM_T_82540:
   10251 	case WM_T_82545:
   10252 	case WM_T_82545_3:
   10253 	case WM_T_82546:
   10254 	case WM_T_82546_3:
   10255 	case WM_T_82541_2:
   10256 	case WM_T_82547_2:
   10257 	case WM_T_82571:
   10258 	case WM_T_82572:
   10259 	case WM_T_82573:
   10260 	case WM_T_82574:
   10261 	case WM_T_82583:
   10262 	case WM_T_82575:
   10263 	case WM_T_82576:
   10264 	case WM_T_82580:
   10265 	case WM_T_I350:
   10266 	case WM_T_I354:
   10267 	case WM_T_I210:
   10268 	case WM_T_I211:
   10269 	case WM_T_80003:
   10270 		/* Null */
   10271 		break;
   10272 	case WM_T_82541:
   10273 	case WM_T_82547:
   10274 		/* XXX Configure actively LED after PHY reset */
   10275 		break;
   10276 	case WM_T_ICH8:
   10277 	case WM_T_ICH9:
   10278 	case WM_T_ICH10:
   10279 	case WM_T_PCH:
   10280 	case WM_T_PCH2:
   10281 	case WM_T_PCH_LPT:
   10282 	case WM_T_PCH_SPT:
   10283 	case WM_T_PCH_CNP:
   10284 		wm_phy_post_reset(sc);
   10285 		break;
   10286 	default:
   10287 		panic("%s: unknown type\n", __func__);
   10288 		break;
   10289 	}
   10290 }
   10291 
   10292 /*
   10293  * Setup sc_phytype and mii_{read|write}reg.
   10294  *
   10295  *  To identify PHY type, correct read/write function should be selected.
   10296  * To select correct read/write function, PCI ID or MAC type are required
   10297  * without accessing PHY registers.
   10298  *
   10299  *  On the first call of this function, PHY ID is not known yet. Check
   10300  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10301  * result might be incorrect.
   10302  *
   10303  *  In the second call, PHY OUI and model is used to identify PHY type.
   10304  * It might not be perfect because of the lack of compared entry, but it
   10305  * would be better than the first call.
   10306  *
   10307  *  If the detected new result and previous assumption is different,
   10308  * diagnous message will be printed.
   10309  */
   10310 static void
   10311 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10312     uint16_t phy_model)
   10313 {
   10314 	device_t dev = sc->sc_dev;
   10315 	struct mii_data *mii = &sc->sc_mii;
   10316 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10317 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10318 	mii_readreg_t new_readreg;
   10319 	mii_writereg_t new_writereg;
   10320 	bool dodiag = true;
   10321 
   10322 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10323 		device_xname(sc->sc_dev), __func__));
   10324 
   10325 	/*
   10326 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10327 	 * incorrect. So don't print diag output when it's 2nd call.
   10328 	 */
   10329 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10330 		dodiag = false;
   10331 
   10332 	if (mii->mii_readreg == NULL) {
   10333 		/*
   10334 		 *  This is the first call of this function. For ICH and PCH
   10335 		 * variants, it's difficult to determine the PHY access method
   10336 		 * by sc_type, so use the PCI product ID for some devices.
   10337 		 */
   10338 
   10339 		switch (sc->sc_pcidevid) {
   10340 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10341 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10342 			/* 82577 */
   10343 			new_phytype = WMPHY_82577;
   10344 			break;
   10345 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10346 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10347 			/* 82578 */
   10348 			new_phytype = WMPHY_82578;
   10349 			break;
   10350 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10351 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10352 			/* 82579 */
   10353 			new_phytype = WMPHY_82579;
   10354 			break;
   10355 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10356 		case PCI_PRODUCT_INTEL_82801I_BM:
   10357 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10358 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10359 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10360 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10361 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10362 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10363 			/* ICH8, 9, 10 with 82567 */
   10364 			new_phytype = WMPHY_BM;
   10365 			break;
   10366 		default:
   10367 			break;
   10368 		}
   10369 	} else {
   10370 		/* It's not the first call. Use PHY OUI and model */
   10371 		switch (phy_oui) {
   10372 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10373 			switch (phy_model) {
   10374 			case 0x0004: /* XXX */
   10375 				new_phytype = WMPHY_82578;
   10376 				break;
   10377 			default:
   10378 				break;
   10379 			}
   10380 			break;
   10381 		case MII_OUI_xxMARVELL:
   10382 			switch (phy_model) {
   10383 			case MII_MODEL_xxMARVELL_I210:
   10384 				new_phytype = WMPHY_I210;
   10385 				break;
   10386 			case MII_MODEL_xxMARVELL_E1011:
   10387 			case MII_MODEL_xxMARVELL_E1000_3:
   10388 			case MII_MODEL_xxMARVELL_E1000_5:
   10389 			case MII_MODEL_xxMARVELL_E1112:
   10390 				new_phytype = WMPHY_M88;
   10391 				break;
   10392 			case MII_MODEL_xxMARVELL_E1149:
   10393 				new_phytype = WMPHY_BM;
   10394 				break;
   10395 			case MII_MODEL_xxMARVELL_E1111:
   10396 			case MII_MODEL_xxMARVELL_I347:
   10397 			case MII_MODEL_xxMARVELL_E1512:
   10398 			case MII_MODEL_xxMARVELL_E1340M:
   10399 			case MII_MODEL_xxMARVELL_E1543:
   10400 				new_phytype = WMPHY_M88;
   10401 				break;
   10402 			case MII_MODEL_xxMARVELL_I82563:
   10403 				new_phytype = WMPHY_GG82563;
   10404 				break;
   10405 			default:
   10406 				break;
   10407 			}
   10408 			break;
   10409 		case MII_OUI_INTEL:
   10410 			switch (phy_model) {
   10411 			case MII_MODEL_INTEL_I82577:
   10412 				new_phytype = WMPHY_82577;
   10413 				break;
   10414 			case MII_MODEL_INTEL_I82579:
   10415 				new_phytype = WMPHY_82579;
   10416 				break;
   10417 			case MII_MODEL_INTEL_I217:
   10418 				new_phytype = WMPHY_I217;
   10419 				break;
   10420 			case MII_MODEL_INTEL_I82580:
   10421 				new_phytype = WMPHY_82580;
   10422 				break;
   10423 			case MII_MODEL_INTEL_I350:
   10424 				new_phytype = WMPHY_I350;
   10425 				break;
   10426 				break;
   10427 			default:
   10428 				break;
   10429 			}
   10430 			break;
   10431 		case MII_OUI_yyINTEL:
   10432 			switch (phy_model) {
   10433 			case MII_MODEL_yyINTEL_I82562G:
   10434 			case MII_MODEL_yyINTEL_I82562EM:
   10435 			case MII_MODEL_yyINTEL_I82562ET:
   10436 				new_phytype = WMPHY_IFE;
   10437 				break;
   10438 			case MII_MODEL_yyINTEL_IGP01E1000:
   10439 				new_phytype = WMPHY_IGP;
   10440 				break;
   10441 			case MII_MODEL_yyINTEL_I82566:
   10442 				new_phytype = WMPHY_IGP_3;
   10443 				break;
   10444 			default:
   10445 				break;
   10446 			}
   10447 			break;
   10448 		default:
   10449 			break;
   10450 		}
   10451 
   10452 		if (dodiag) {
   10453 			if (new_phytype == WMPHY_UNKNOWN)
   10454 				aprint_verbose_dev(dev,
   10455 				    "%s: Unknown PHY model. OUI=%06x, "
   10456 				    "model=%04x\n", __func__, phy_oui,
   10457 				    phy_model);
   10458 
   10459 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10460 			    && (sc->sc_phytype != new_phytype)) {
   10461 				aprint_error_dev(dev, "Previously assumed PHY "
   10462 				    "type(%u) was incorrect. PHY type from PHY"
   10463 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10464 			}
   10465 		}
   10466 	}
   10467 
   10468 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10469 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10470 		/* SGMII */
   10471 		new_readreg = wm_sgmii_readreg;
   10472 		new_writereg = wm_sgmii_writereg;
   10473 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10474 		/* BM2 (phyaddr == 1) */
   10475 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10476 		    && (new_phytype != WMPHY_BM)
   10477 		    && (new_phytype != WMPHY_UNKNOWN))
   10478 			doubt_phytype = new_phytype;
   10479 		new_phytype = WMPHY_BM;
   10480 		new_readreg = wm_gmii_bm_readreg;
   10481 		new_writereg = wm_gmii_bm_writereg;
   10482 	} else if (sc->sc_type >= WM_T_PCH) {
   10483 		/* All PCH* use _hv_ */
   10484 		new_readreg = wm_gmii_hv_readreg;
   10485 		new_writereg = wm_gmii_hv_writereg;
   10486 	} else if (sc->sc_type >= WM_T_ICH8) {
   10487 		/* non-82567 ICH8, 9 and 10 */
   10488 		new_readreg = wm_gmii_i82544_readreg;
   10489 		new_writereg = wm_gmii_i82544_writereg;
   10490 	} else if (sc->sc_type >= WM_T_80003) {
   10491 		/* 80003 */
   10492 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10493 		    && (new_phytype != WMPHY_GG82563)
   10494 		    && (new_phytype != WMPHY_UNKNOWN))
   10495 			doubt_phytype = new_phytype;
   10496 		new_phytype = WMPHY_GG82563;
   10497 		new_readreg = wm_gmii_i80003_readreg;
   10498 		new_writereg = wm_gmii_i80003_writereg;
   10499 	} else if (sc->sc_type >= WM_T_I210) {
   10500 		/* I210 and I211 */
   10501 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10502 		    && (new_phytype != WMPHY_I210)
   10503 		    && (new_phytype != WMPHY_UNKNOWN))
   10504 			doubt_phytype = new_phytype;
   10505 		new_phytype = WMPHY_I210;
   10506 		new_readreg = wm_gmii_gs40g_readreg;
   10507 		new_writereg = wm_gmii_gs40g_writereg;
   10508 	} else if (sc->sc_type >= WM_T_82580) {
   10509 		/* 82580, I350 and I354 */
   10510 		new_readreg = wm_gmii_82580_readreg;
   10511 		new_writereg = wm_gmii_82580_writereg;
   10512 	} else if (sc->sc_type >= WM_T_82544) {
   10513 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10514 		new_readreg = wm_gmii_i82544_readreg;
   10515 		new_writereg = wm_gmii_i82544_writereg;
   10516 	} else {
   10517 		new_readreg = wm_gmii_i82543_readreg;
   10518 		new_writereg = wm_gmii_i82543_writereg;
   10519 	}
   10520 
   10521 	if (new_phytype == WMPHY_BM) {
   10522 		/* All BM use _bm_ */
   10523 		new_readreg = wm_gmii_bm_readreg;
   10524 		new_writereg = wm_gmii_bm_writereg;
   10525 	}
   10526 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10527 		/* All PCH* use _hv_ */
   10528 		new_readreg = wm_gmii_hv_readreg;
   10529 		new_writereg = wm_gmii_hv_writereg;
   10530 	}
   10531 
   10532 	/* Diag output */
   10533 	if (dodiag) {
   10534 		if (doubt_phytype != WMPHY_UNKNOWN)
   10535 			aprint_error_dev(dev, "Assumed new PHY type was "
   10536 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10537 			    new_phytype);
   10538 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10539 		    && (sc->sc_phytype != new_phytype))
   10540 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10541 			    "was incorrect. New PHY type = %u\n",
   10542 			    sc->sc_phytype, new_phytype);
   10543 
   10544 		if ((mii->mii_readreg != NULL) &&
   10545 		    (new_phytype == WMPHY_UNKNOWN))
   10546 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10547 
   10548 		if ((mii->mii_readreg != NULL) &&
   10549 		    (mii->mii_readreg != new_readreg))
   10550 			aprint_error_dev(dev, "Previously assumed PHY "
   10551 			    "read/write function was incorrect.\n");
   10552 	}
   10553 
   10554 	/* Update now */
   10555 	sc->sc_phytype = new_phytype;
   10556 	mii->mii_readreg = new_readreg;
   10557 	mii->mii_writereg = new_writereg;
   10558 	if (new_readreg == wm_gmii_hv_readreg) {
   10559 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10560 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10561 	} else if (new_readreg == wm_sgmii_readreg) {
   10562 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10563 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10564 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10565 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10566 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10567 	}
   10568 }
   10569 
   10570 /*
   10571  * wm_get_phy_id_82575:
   10572  *
   10573  * Return PHY ID. Return -1 if it failed.
   10574  */
   10575 static int
   10576 wm_get_phy_id_82575(struct wm_softc *sc)
   10577 {
   10578 	uint32_t reg;
   10579 	int phyid = -1;
   10580 
   10581 	/* XXX */
   10582 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10583 		return -1;
   10584 
   10585 	if (wm_sgmii_uses_mdio(sc)) {
   10586 		switch (sc->sc_type) {
   10587 		case WM_T_82575:
   10588 		case WM_T_82576:
   10589 			reg = CSR_READ(sc, WMREG_MDIC);
   10590 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10591 			break;
   10592 		case WM_T_82580:
   10593 		case WM_T_I350:
   10594 		case WM_T_I354:
   10595 		case WM_T_I210:
   10596 		case WM_T_I211:
   10597 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10598 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10599 			break;
   10600 		default:
   10601 			return -1;
   10602 		}
   10603 	}
   10604 
   10605 	return phyid;
   10606 }
   10607 
   10608 /*
   10609  * wm_gmii_mediainit:
   10610  *
   10611  *	Initialize media for use on 1000BASE-T devices.
   10612  */
   10613 static void
   10614 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10615 {
   10616 	device_t dev = sc->sc_dev;
   10617 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10618 	struct mii_data *mii = &sc->sc_mii;
   10619 
   10620 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10621 		device_xname(sc->sc_dev), __func__));
   10622 
   10623 	/* We have GMII. */
   10624 	sc->sc_flags |= WM_F_HAS_MII;
   10625 
   10626 	if (sc->sc_type == WM_T_80003)
   10627 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10628 	else
   10629 		sc->sc_tipg = TIPG_1000T_DFLT;
   10630 
   10631 	/*
   10632 	 * Let the chip set speed/duplex on its own based on
   10633 	 * signals from the PHY.
   10634 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10635 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10636 	 */
   10637 	sc->sc_ctrl |= CTRL_SLU;
   10638 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10639 
   10640 	/* Initialize our media structures and probe the GMII. */
   10641 	mii->mii_ifp = ifp;
   10642 
   10643 	mii->mii_statchg = wm_gmii_statchg;
   10644 
   10645 	/* get PHY control from SMBus to PCIe */
   10646 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10647 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10648 	    || (sc->sc_type == WM_T_PCH_CNP))
   10649 		wm_init_phy_workarounds_pchlan(sc);
   10650 
   10651 	wm_gmii_reset(sc);
   10652 
   10653 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10654 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10655 	    wm_gmii_mediastatus, sc->sc_core_lock);
   10656 
   10657 	/* Setup internal SGMII PHY for SFP */
   10658 	wm_sgmii_sfp_preconfig(sc);
   10659 
   10660 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10661 	    || (sc->sc_type == WM_T_82580)
   10662 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10663 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10664 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10665 			/* Attach only one port */
   10666 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10667 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10668 		} else {
   10669 			int i, id;
   10670 			uint32_t ctrl_ext;
   10671 
   10672 			id = wm_get_phy_id_82575(sc);
   10673 			if (id != -1) {
   10674 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10675 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10676 			}
   10677 			if ((id == -1)
   10678 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10679 				/* Power on sgmii phy if it is disabled */
   10680 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10681 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10682 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10683 				CSR_WRITE_FLUSH(sc);
   10684 				delay(300*1000); /* XXX too long */
   10685 
   10686 				/*
   10687 				 * From 1 to 8.
   10688 				 *
   10689 				 * I2C access fails with I2C register's ERROR
   10690 				 * bit set, so prevent error message while
   10691 				 * scanning.
   10692 				 */
   10693 				sc->phy.no_errprint = true;
   10694 				for (i = 1; i < 8; i++)
   10695 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10696 					    0xffffffff, i, MII_OFFSET_ANY,
   10697 					    MIIF_DOPAUSE);
   10698 				sc->phy.no_errprint = false;
   10699 
   10700 				/* Restore previous sfp cage power state */
   10701 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10702 			}
   10703 		}
   10704 	} else
   10705 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10706 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10707 
   10708 	/*
   10709 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10710 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10711 	 */
   10712 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10713 		|| (sc->sc_type == WM_T_PCH_SPT)
   10714 		|| (sc->sc_type == WM_T_PCH_CNP))
   10715 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10716 		wm_set_mdio_slow_mode_hv(sc);
   10717 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10718 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10719 	}
   10720 
   10721 	/*
   10722 	 * (For ICH8 variants)
   10723 	 * If PHY detection failed, use BM's r/w function and retry.
   10724 	 */
   10725 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10726 		/* if failed, retry with *_bm_* */
   10727 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10728 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10729 		    sc->sc_phytype);
   10730 		sc->sc_phytype = WMPHY_BM;
   10731 		mii->mii_readreg = wm_gmii_bm_readreg;
   10732 		mii->mii_writereg = wm_gmii_bm_writereg;
   10733 
   10734 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10735 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10736 	}
   10737 
   10738 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10739 		/* Any PHY wasn't find */
   10740 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10741 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10742 		sc->sc_phytype = WMPHY_NONE;
   10743 	} else {
   10744 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10745 
   10746 		/*
   10747 		 * PHY Found! Check PHY type again by the second call of
   10748 		 * wm_gmii_setup_phytype.
   10749 		 */
   10750 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10751 		    child->mii_mpd_model);
   10752 
   10753 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10754 	}
   10755 }
   10756 
   10757 /*
   10758  * wm_gmii_mediachange:	[ifmedia interface function]
   10759  *
   10760  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10761  */
   10762 static int
   10763 wm_gmii_mediachange(struct ifnet *ifp)
   10764 {
   10765 	struct wm_softc *sc = ifp->if_softc;
   10766 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10767 	uint32_t reg;
   10768 	int rc;
   10769 
   10770 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10771 		device_xname(sc->sc_dev), __func__));
   10772 	if ((ifp->if_flags & IFF_UP) == 0)
   10773 		return 0;
   10774 
   10775 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10776 	if ((sc->sc_type == WM_T_82580)
   10777 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10778 	    || (sc->sc_type == WM_T_I211)) {
   10779 		reg = CSR_READ(sc, WMREG_PHPM);
   10780 		reg &= ~PHPM_GO_LINK_D;
   10781 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10782 	}
   10783 
   10784 	/* Disable D0 LPLU. */
   10785 	wm_lplu_d0_disable(sc);
   10786 
   10787 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10788 	sc->sc_ctrl |= CTRL_SLU;
   10789 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10790 	    || (sc->sc_type > WM_T_82543)) {
   10791 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10792 	} else {
   10793 		sc->sc_ctrl &= ~CTRL_ASDE;
   10794 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10795 		if (ife->ifm_media & IFM_FDX)
   10796 			sc->sc_ctrl |= CTRL_FD;
   10797 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10798 		case IFM_10_T:
   10799 			sc->sc_ctrl |= CTRL_SPEED_10;
   10800 			break;
   10801 		case IFM_100_TX:
   10802 			sc->sc_ctrl |= CTRL_SPEED_100;
   10803 			break;
   10804 		case IFM_1000_T:
   10805 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10806 			break;
   10807 		case IFM_NONE:
   10808 			/* There is no specific setting for IFM_NONE */
   10809 			break;
   10810 		default:
   10811 			panic("wm_gmii_mediachange: bad media 0x%x",
   10812 			    ife->ifm_media);
   10813 		}
   10814 	}
   10815 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10816 	CSR_WRITE_FLUSH(sc);
   10817 
   10818 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10819 		wm_serdes_mediachange(ifp);
   10820 
   10821 	if (sc->sc_type <= WM_T_82543)
   10822 		wm_gmii_reset(sc);
   10823 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10824 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10825 		/* allow time for SFP cage time to power up phy */
   10826 		delay(300 * 1000);
   10827 		wm_gmii_reset(sc);
   10828 	}
   10829 
   10830 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10831 		return 0;
   10832 	return rc;
   10833 }
   10834 
   10835 /*
   10836  * wm_gmii_mediastatus:	[ifmedia interface function]
   10837  *
   10838  *	Get the current interface media status on a 1000BASE-T device.
   10839  */
   10840 static void
   10841 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10842 {
   10843 	struct wm_softc *sc = ifp->if_softc;
   10844 
   10845 	ether_mediastatus(ifp, ifmr);
   10846 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10847 	    | sc->sc_flowflags;
   10848 }
   10849 
   10850 #define	MDI_IO		CTRL_SWDPIN(2)
   10851 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10852 #define	MDI_CLK		CTRL_SWDPIN(3)
   10853 
   10854 static void
   10855 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10856 {
   10857 	uint32_t i, v;
   10858 
   10859 	v = CSR_READ(sc, WMREG_CTRL);
   10860 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10861 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10862 
   10863 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10864 		if (data & i)
   10865 			v |= MDI_IO;
   10866 		else
   10867 			v &= ~MDI_IO;
   10868 		CSR_WRITE(sc, WMREG_CTRL, v);
   10869 		CSR_WRITE_FLUSH(sc);
   10870 		delay(10);
   10871 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10872 		CSR_WRITE_FLUSH(sc);
   10873 		delay(10);
   10874 		CSR_WRITE(sc, WMREG_CTRL, v);
   10875 		CSR_WRITE_FLUSH(sc);
   10876 		delay(10);
   10877 	}
   10878 }
   10879 
   10880 static uint16_t
   10881 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10882 {
   10883 	uint32_t v, i;
   10884 	uint16_t data = 0;
   10885 
   10886 	v = CSR_READ(sc, WMREG_CTRL);
   10887 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10888 	v |= CTRL_SWDPIO(3);
   10889 
   10890 	CSR_WRITE(sc, WMREG_CTRL, v);
   10891 	CSR_WRITE_FLUSH(sc);
   10892 	delay(10);
   10893 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10894 	CSR_WRITE_FLUSH(sc);
   10895 	delay(10);
   10896 	CSR_WRITE(sc, WMREG_CTRL, v);
   10897 	CSR_WRITE_FLUSH(sc);
   10898 	delay(10);
   10899 
   10900 	for (i = 0; i < 16; i++) {
   10901 		data <<= 1;
   10902 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10903 		CSR_WRITE_FLUSH(sc);
   10904 		delay(10);
   10905 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10906 			data |= 1;
   10907 		CSR_WRITE(sc, WMREG_CTRL, v);
   10908 		CSR_WRITE_FLUSH(sc);
   10909 		delay(10);
   10910 	}
   10911 
   10912 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10913 	CSR_WRITE_FLUSH(sc);
   10914 	delay(10);
   10915 	CSR_WRITE(sc, WMREG_CTRL, v);
   10916 	CSR_WRITE_FLUSH(sc);
   10917 	delay(10);
   10918 
   10919 	return data;
   10920 }
   10921 
   10922 #undef MDI_IO
   10923 #undef MDI_DIR
   10924 #undef MDI_CLK
   10925 
   10926 /*
   10927  * wm_gmii_i82543_readreg:	[mii interface function]
   10928  *
   10929  *	Read a PHY register on the GMII (i82543 version).
   10930  */
   10931 static int
   10932 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10933 {
   10934 	struct wm_softc *sc = device_private(dev);
   10935 
   10936 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10937 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10938 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10939 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10940 
   10941 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10942 		device_xname(dev), phy, reg, *val));
   10943 
   10944 	return 0;
   10945 }
   10946 
   10947 /*
   10948  * wm_gmii_i82543_writereg:	[mii interface function]
   10949  *
   10950  *	Write a PHY register on the GMII (i82543 version).
   10951  */
   10952 static int
   10953 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10954 {
   10955 	struct wm_softc *sc = device_private(dev);
   10956 
   10957 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10958 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10959 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10960 	    (MII_COMMAND_START << 30), 32);
   10961 
   10962 	return 0;
   10963 }
   10964 
   10965 /*
   10966  * wm_gmii_mdic_readreg:	[mii interface function]
   10967  *
   10968  *	Read a PHY register on the GMII.
   10969  */
   10970 static int
   10971 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10972 {
   10973 	struct wm_softc *sc = device_private(dev);
   10974 	uint32_t mdic = 0;
   10975 	int i;
   10976 
   10977 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10978 	    && (reg > MII_ADDRMASK)) {
   10979 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10980 		    __func__, sc->sc_phytype, reg);
   10981 		reg &= MII_ADDRMASK;
   10982 	}
   10983 
   10984 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10985 	    MDIC_REGADD(reg));
   10986 
   10987 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10988 		delay(50);
   10989 		mdic = CSR_READ(sc, WMREG_MDIC);
   10990 		if (mdic & MDIC_READY)
   10991 			break;
   10992 	}
   10993 
   10994 	if ((mdic & MDIC_READY) == 0) {
   10995 		DPRINTF(sc, WM_DEBUG_GMII,
   10996 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10997 			device_xname(dev), phy, reg));
   10998 		return ETIMEDOUT;
   10999 	} else if (mdic & MDIC_E) {
   11000 		/* This is normal if no PHY is present. */
   11001 		DPRINTF(sc, WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   11002 			device_xname(sc->sc_dev), phy, reg));
   11003 		return -1;
   11004 	} else
   11005 		*val = MDIC_DATA(mdic);
   11006 
   11007 	/*
   11008 	 * Allow some time after each MDIC transaction to avoid
   11009 	 * reading duplicate data in the next MDIC transaction.
   11010 	 */
   11011 	if (sc->sc_type == WM_T_PCH2)
   11012 		delay(100);
   11013 
   11014 	return 0;
   11015 }
   11016 
   11017 /*
   11018  * wm_gmii_mdic_writereg:	[mii interface function]
   11019  *
   11020  *	Write a PHY register on the GMII.
   11021  */
   11022 static int
   11023 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11024 {
   11025 	struct wm_softc *sc = device_private(dev);
   11026 	uint32_t mdic = 0;
   11027 	int i;
   11028 
   11029 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11030 	    && (reg > MII_ADDRMASK)) {
   11031 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11032 		    __func__, sc->sc_phytype, reg);
   11033 		reg &= MII_ADDRMASK;
   11034 	}
   11035 
   11036 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11037 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11038 
   11039 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11040 		delay(50);
   11041 		mdic = CSR_READ(sc, WMREG_MDIC);
   11042 		if (mdic & MDIC_READY)
   11043 			break;
   11044 	}
   11045 
   11046 	if ((mdic & MDIC_READY) == 0) {
   11047 		DPRINTF(sc, WM_DEBUG_GMII,
   11048 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11049 			device_xname(dev), phy, reg));
   11050 		return ETIMEDOUT;
   11051 	} else if (mdic & MDIC_E) {
   11052 		DPRINTF(sc, WM_DEBUG_GMII,
   11053 		    ("%s: MDIC write error: phy %d reg %d\n",
   11054 			device_xname(dev), phy, reg));
   11055 		return -1;
   11056 	}
   11057 
   11058 	/*
   11059 	 * Allow some time after each MDIC transaction to avoid
   11060 	 * reading duplicate data in the next MDIC transaction.
   11061 	 */
   11062 	if (sc->sc_type == WM_T_PCH2)
   11063 		delay(100);
   11064 
   11065 	return 0;
   11066 }
   11067 
   11068 /*
   11069  * wm_gmii_i82544_readreg:	[mii interface function]
   11070  *
   11071  *	Read a PHY register on the GMII.
   11072  */
   11073 static int
   11074 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11075 {
   11076 	struct wm_softc *sc = device_private(dev);
   11077 	int rv;
   11078 
   11079 	if (sc->phy.acquire(sc)) {
   11080 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11081 		return -1;
   11082 	}
   11083 
   11084 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11085 
   11086 	sc->phy.release(sc);
   11087 
   11088 	return rv;
   11089 }
   11090 
   11091 static int
   11092 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11093 {
   11094 	struct wm_softc *sc = device_private(dev);
   11095 	int rv;
   11096 
   11097 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11098 		switch (sc->sc_phytype) {
   11099 		case WMPHY_IGP:
   11100 		case WMPHY_IGP_2:
   11101 		case WMPHY_IGP_3:
   11102 			rv = wm_gmii_mdic_writereg(dev, phy,
   11103 			    IGPHY_PAGE_SELECT, reg);
   11104 			if (rv != 0)
   11105 				return rv;
   11106 			break;
   11107 		default:
   11108 #ifdef WM_DEBUG
   11109 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11110 			    __func__, sc->sc_phytype, reg);
   11111 #endif
   11112 			break;
   11113 		}
   11114 	}
   11115 
   11116 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11117 }
   11118 
   11119 /*
   11120  * wm_gmii_i82544_writereg:	[mii interface function]
   11121  *
   11122  *	Write a PHY register on the GMII.
   11123  */
   11124 static int
   11125 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11126 {
   11127 	struct wm_softc *sc = device_private(dev);
   11128 	int rv;
   11129 
   11130 	if (sc->phy.acquire(sc)) {
   11131 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11132 		return -1;
   11133 	}
   11134 
   11135 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11136 	sc->phy.release(sc);
   11137 
   11138 	return rv;
   11139 }
   11140 
   11141 static int
   11142 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11143 {
   11144 	struct wm_softc *sc = device_private(dev);
   11145 	int rv;
   11146 
   11147 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11148 		switch (sc->sc_phytype) {
   11149 		case WMPHY_IGP:
   11150 		case WMPHY_IGP_2:
   11151 		case WMPHY_IGP_3:
   11152 			rv = wm_gmii_mdic_writereg(dev, phy,
   11153 			    IGPHY_PAGE_SELECT, reg);
   11154 			if (rv != 0)
   11155 				return rv;
   11156 			break;
   11157 		default:
   11158 #ifdef WM_DEBUG
   11159 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11160 			    __func__, sc->sc_phytype, reg);
   11161 #endif
   11162 			break;
   11163 		}
   11164 	}
   11165 
   11166 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11167 }
   11168 
   11169 /*
   11170  * wm_gmii_i80003_readreg:	[mii interface function]
   11171  *
   11172  *	Read a PHY register on the kumeran
   11173  * This could be handled by the PHY layer if we didn't have to lock the
   11174  * resource ...
   11175  */
   11176 static int
   11177 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11178 {
   11179 	struct wm_softc *sc = device_private(dev);
   11180 	int page_select;
   11181 	uint16_t temp, temp2;
   11182 	int rv = 0;
   11183 
   11184 	if (phy != 1) /* Only one PHY on kumeran bus */
   11185 		return -1;
   11186 
   11187 	if (sc->phy.acquire(sc)) {
   11188 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11189 		return -1;
   11190 	}
   11191 
   11192 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11193 		page_select = GG82563_PHY_PAGE_SELECT;
   11194 	else {
   11195 		/*
   11196 		 * Use Alternative Page Select register to access registers
   11197 		 * 30 and 31.
   11198 		 */
   11199 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11200 	}
   11201 	temp = reg >> GG82563_PAGE_SHIFT;
   11202 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11203 		goto out;
   11204 
   11205 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11206 		/*
   11207 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11208 		 * register.
   11209 		 */
   11210 		delay(200);
   11211 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11212 		if ((rv != 0) || (temp2 != temp)) {
   11213 			device_printf(dev, "%s failed\n", __func__);
   11214 			rv = -1;
   11215 			goto out;
   11216 		}
   11217 		delay(200);
   11218 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11219 		delay(200);
   11220 	} else
   11221 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11222 
   11223 out:
   11224 	sc->phy.release(sc);
   11225 	return rv;
   11226 }
   11227 
   11228 /*
   11229  * wm_gmii_i80003_writereg:	[mii interface function]
   11230  *
   11231  *	Write a PHY register on the kumeran.
   11232  * This could be handled by the PHY layer if we didn't have to lock the
   11233  * resource ...
   11234  */
   11235 static int
   11236 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11237 {
   11238 	struct wm_softc *sc = device_private(dev);
   11239 	int page_select, rv;
   11240 	uint16_t temp, temp2;
   11241 
   11242 	if (phy != 1) /* Only one PHY on kumeran bus */
   11243 		return -1;
   11244 
   11245 	if (sc->phy.acquire(sc)) {
   11246 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11247 		return -1;
   11248 	}
   11249 
   11250 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11251 		page_select = GG82563_PHY_PAGE_SELECT;
   11252 	else {
   11253 		/*
   11254 		 * Use Alternative Page Select register to access registers
   11255 		 * 30 and 31.
   11256 		 */
   11257 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11258 	}
   11259 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11260 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11261 		goto out;
   11262 
   11263 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11264 		/*
   11265 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11266 		 * register.
   11267 		 */
   11268 		delay(200);
   11269 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11270 		if ((rv != 0) || (temp2 != temp)) {
   11271 			device_printf(dev, "%s failed\n", __func__);
   11272 			rv = -1;
   11273 			goto out;
   11274 		}
   11275 		delay(200);
   11276 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11277 		delay(200);
   11278 	} else
   11279 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11280 
   11281 out:
   11282 	sc->phy.release(sc);
   11283 	return rv;
   11284 }
   11285 
   11286 /*
   11287  * wm_gmii_bm_readreg:	[mii interface function]
   11288  *
   11289  *	Read a PHY register on the kumeran
   11290  * This could be handled by the PHY layer if we didn't have to lock the
   11291  * resource ...
   11292  */
   11293 static int
   11294 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11295 {
   11296 	struct wm_softc *sc = device_private(dev);
   11297 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11298 	int rv;
   11299 
   11300 	if (sc->phy.acquire(sc)) {
   11301 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11302 		return -1;
   11303 	}
   11304 
   11305 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11306 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11307 		    || (reg == 31)) ? 1 : phy;
   11308 	/* Page 800 works differently than the rest so it has its own func */
   11309 	if (page == BM_WUC_PAGE) {
   11310 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11311 		goto release;
   11312 	}
   11313 
   11314 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11315 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11316 		    && (sc->sc_type != WM_T_82583))
   11317 			rv = wm_gmii_mdic_writereg(dev, phy,
   11318 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11319 		else
   11320 			rv = wm_gmii_mdic_writereg(dev, phy,
   11321 			    BME1000_PHY_PAGE_SELECT, page);
   11322 		if (rv != 0)
   11323 			goto release;
   11324 	}
   11325 
   11326 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11327 
   11328 release:
   11329 	sc->phy.release(sc);
   11330 	return rv;
   11331 }
   11332 
   11333 /*
   11334  * wm_gmii_bm_writereg:	[mii interface function]
   11335  *
   11336  *	Write a PHY register on the kumeran.
   11337  * This could be handled by the PHY layer if we didn't have to lock the
   11338  * resource ...
   11339  */
   11340 static int
   11341 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11342 {
   11343 	struct wm_softc *sc = device_private(dev);
   11344 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11345 	int rv;
   11346 
   11347 	if (sc->phy.acquire(sc)) {
   11348 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11349 		return -1;
   11350 	}
   11351 
   11352 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11353 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11354 		    || (reg == 31)) ? 1 : phy;
   11355 	/* Page 800 works differently than the rest so it has its own func */
   11356 	if (page == BM_WUC_PAGE) {
   11357 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11358 		goto release;
   11359 	}
   11360 
   11361 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11362 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11363 		    && (sc->sc_type != WM_T_82583))
   11364 			rv = wm_gmii_mdic_writereg(dev, phy,
   11365 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11366 		else
   11367 			rv = wm_gmii_mdic_writereg(dev, phy,
   11368 			    BME1000_PHY_PAGE_SELECT, page);
   11369 		if (rv != 0)
   11370 			goto release;
   11371 	}
   11372 
   11373 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11374 
   11375 release:
   11376 	sc->phy.release(sc);
   11377 	return rv;
   11378 }
   11379 
   11380 /*
   11381  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11382  *  @dev: pointer to the HW structure
   11383  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11384  *
   11385  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11386  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11387  */
   11388 static int
   11389 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11390 {
   11391 #ifdef WM_DEBUG
   11392 	struct wm_softc *sc = device_private(dev);
   11393 #endif
   11394 	uint16_t temp;
   11395 	int rv;
   11396 
   11397 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11398 		device_xname(dev), __func__));
   11399 
   11400 	if (!phy_regp)
   11401 		return -1;
   11402 
   11403 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11404 
   11405 	/* Select Port Control Registers page */
   11406 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11407 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11408 	if (rv != 0)
   11409 		return rv;
   11410 
   11411 	/* Read WUCE and save it */
   11412 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11413 	if (rv != 0)
   11414 		return rv;
   11415 
   11416 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11417 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11418 	 */
   11419 	temp = *phy_regp;
   11420 	temp |= BM_WUC_ENABLE_BIT;
   11421 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11422 
   11423 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11424 		return rv;
   11425 
   11426 	/* Select Host Wakeup Registers page - caller now able to write
   11427 	 * registers on the Wakeup registers page
   11428 	 */
   11429 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11430 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11431 }
   11432 
   11433 /*
   11434  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11435  *  @dev: pointer to the HW structure
   11436  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11437  *
   11438  *  Restore BM_WUC_ENABLE_REG to its original value.
   11439  *
   11440  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11441  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11442  *  caller.
   11443  */
   11444 static int
   11445 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11446 {
   11447 #ifdef WM_DEBUG
   11448 	struct wm_softc *sc = device_private(dev);
   11449 #endif
   11450 
   11451 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11452 		device_xname(dev), __func__));
   11453 
   11454 	if (!phy_regp)
   11455 		return -1;
   11456 
   11457 	/* Select Port Control Registers page */
   11458 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11459 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11460 
   11461 	/* Restore 769.17 to its original value */
   11462 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11463 
   11464 	return 0;
   11465 }
   11466 
   11467 /*
   11468  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11469  *  @sc: pointer to the HW structure
   11470  *  @offset: register offset to be read or written
   11471  *  @val: pointer to the data to read or write
   11472  *  @rd: determines if operation is read or write
   11473  *  @page_set: BM_WUC_PAGE already set and access enabled
   11474  *
   11475  *  Read the PHY register at offset and store the retrieved information in
   11476  *  data, or write data to PHY register at offset.  Note the procedure to
   11477  *  access the PHY wakeup registers is different than reading the other PHY
   11478  *  registers. It works as such:
   11479  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11480  *  2) Set page to 800 for host (801 if we were manageability)
   11481  *  3) Write the address using the address opcode (0x11)
   11482  *  4) Read or write the data using the data opcode (0x12)
   11483  *  5) Restore 769.17.2 to its original value
   11484  *
   11485  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11486  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11487  *
   11488  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11489  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11490  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11491  */
   11492 static int
   11493 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11494 	bool page_set)
   11495 {
   11496 	struct wm_softc *sc = device_private(dev);
   11497 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11498 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11499 	uint16_t wuce;
   11500 	int rv = 0;
   11501 
   11502 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11503 		device_xname(dev), __func__));
   11504 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11505 	if ((sc->sc_type == WM_T_PCH)
   11506 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11507 		device_printf(dev,
   11508 		    "Attempting to access page %d while gig enabled.\n", page);
   11509 	}
   11510 
   11511 	if (!page_set) {
   11512 		/* Enable access to PHY wakeup registers */
   11513 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11514 		if (rv != 0) {
   11515 			device_printf(dev,
   11516 			    "%s: Could not enable PHY wakeup reg access\n",
   11517 			    __func__);
   11518 			return rv;
   11519 		}
   11520 	}
   11521 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11522 		device_xname(sc->sc_dev), __func__, page, regnum));
   11523 
   11524 	/*
   11525 	 * 2) Access PHY wakeup register.
   11526 	 * See wm_access_phy_wakeup_reg_bm.
   11527 	 */
   11528 
   11529 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11530 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11531 	if (rv != 0)
   11532 		return rv;
   11533 
   11534 	if (rd) {
   11535 		/* Read the Wakeup register page value using opcode 0x12 */
   11536 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11537 	} else {
   11538 		/* Write the Wakeup register page value using opcode 0x12 */
   11539 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11540 	}
   11541 	if (rv != 0)
   11542 		return rv;
   11543 
   11544 	if (!page_set)
   11545 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11546 
   11547 	return rv;
   11548 }
   11549 
   11550 /*
   11551  * wm_gmii_hv_readreg:	[mii interface function]
   11552  *
   11553  *	Read a PHY register on the kumeran
   11554  * This could be handled by the PHY layer if we didn't have to lock the
   11555  * resource ...
   11556  */
   11557 static int
   11558 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11559 {
   11560 	struct wm_softc *sc = device_private(dev);
   11561 	int rv;
   11562 
   11563 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11564 		device_xname(dev), __func__));
   11565 	if (sc->phy.acquire(sc)) {
   11566 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11567 		return -1;
   11568 	}
   11569 
   11570 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11571 	sc->phy.release(sc);
   11572 	return rv;
   11573 }
   11574 
   11575 static int
   11576 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11577 {
   11578 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11579 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11580 	int rv;
   11581 
   11582 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11583 
   11584 	/* Page 800 works differently than the rest so it has its own func */
   11585 	if (page == BM_WUC_PAGE)
   11586 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11587 
   11588 	/*
   11589 	 * Lower than page 768 works differently than the rest so it has its
   11590 	 * own func
   11591 	 */
   11592 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11593 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11594 		return -1;
   11595 	}
   11596 
   11597 	/*
   11598 	 * XXX I21[789] documents say that the SMBus Address register is at
   11599 	 * PHY address 01, Page 0 (not 768), Register 26.
   11600 	 */
   11601 	if (page == HV_INTC_FC_PAGE_START)
   11602 		page = 0;
   11603 
   11604 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11605 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11606 		    page << BME1000_PAGE_SHIFT);
   11607 		if (rv != 0)
   11608 			return rv;
   11609 	}
   11610 
   11611 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11612 }
   11613 
   11614 /*
   11615  * wm_gmii_hv_writereg:	[mii interface function]
   11616  *
   11617  *	Write a PHY register on the kumeran.
   11618  * This could be handled by the PHY layer if we didn't have to lock the
   11619  * resource ...
   11620  */
   11621 static int
   11622 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11623 {
   11624 	struct wm_softc *sc = device_private(dev);
   11625 	int rv;
   11626 
   11627 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11628 		device_xname(dev), __func__));
   11629 
   11630 	if (sc->phy.acquire(sc)) {
   11631 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11632 		return -1;
   11633 	}
   11634 
   11635 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11636 	sc->phy.release(sc);
   11637 
   11638 	return rv;
   11639 }
   11640 
   11641 static int
   11642 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11643 {
   11644 	struct wm_softc *sc = device_private(dev);
   11645 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11646 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11647 	int rv;
   11648 
   11649 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11650 
   11651 	/* Page 800 works differently than the rest so it has its own func */
   11652 	if (page == BM_WUC_PAGE)
   11653 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11654 		    false);
   11655 
   11656 	/*
   11657 	 * Lower than page 768 works differently than the rest so it has its
   11658 	 * own func
   11659 	 */
   11660 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11661 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11662 		return -1;
   11663 	}
   11664 
   11665 	{
   11666 		/*
   11667 		 * XXX I21[789] documents say that the SMBus Address register
   11668 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11669 		 */
   11670 		if (page == HV_INTC_FC_PAGE_START)
   11671 			page = 0;
   11672 
   11673 		/*
   11674 		 * XXX Workaround MDIO accesses being disabled after entering
   11675 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11676 		 * register is set)
   11677 		 */
   11678 		if (sc->sc_phytype == WMPHY_82578) {
   11679 			struct mii_softc *child;
   11680 
   11681 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11682 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11683 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11684 			    && ((val & (1 << 11)) != 0)) {
   11685 				device_printf(dev, "XXX need workaround\n");
   11686 			}
   11687 		}
   11688 
   11689 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11690 			rv = wm_gmii_mdic_writereg(dev, 1,
   11691 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11692 			if (rv != 0)
   11693 				return rv;
   11694 		}
   11695 	}
   11696 
   11697 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11698 }
   11699 
   11700 /*
   11701  * wm_gmii_82580_readreg:	[mii interface function]
   11702  *
   11703  *	Read a PHY register on the 82580 and I350.
   11704  * This could be handled by the PHY layer if we didn't have to lock the
   11705  * resource ...
   11706  */
   11707 static int
   11708 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11709 {
   11710 	struct wm_softc *sc = device_private(dev);
   11711 	int rv;
   11712 
   11713 	if (sc->phy.acquire(sc) != 0) {
   11714 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11715 		return -1;
   11716 	}
   11717 
   11718 #ifdef DIAGNOSTIC
   11719 	if (reg > MII_ADDRMASK) {
   11720 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11721 		    __func__, sc->sc_phytype, reg);
   11722 		reg &= MII_ADDRMASK;
   11723 	}
   11724 #endif
   11725 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11726 
   11727 	sc->phy.release(sc);
   11728 	return rv;
   11729 }
   11730 
   11731 /*
   11732  * wm_gmii_82580_writereg:	[mii interface function]
   11733  *
   11734  *	Write a PHY register on the 82580 and I350.
   11735  * This could be handled by the PHY layer if we didn't have to lock the
   11736  * resource ...
   11737  */
   11738 static int
   11739 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11740 {
   11741 	struct wm_softc *sc = device_private(dev);
   11742 	int rv;
   11743 
   11744 	if (sc->phy.acquire(sc) != 0) {
   11745 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11746 		return -1;
   11747 	}
   11748 
   11749 #ifdef DIAGNOSTIC
   11750 	if (reg > MII_ADDRMASK) {
   11751 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11752 		    __func__, sc->sc_phytype, reg);
   11753 		reg &= MII_ADDRMASK;
   11754 	}
   11755 #endif
   11756 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11757 
   11758 	sc->phy.release(sc);
   11759 	return rv;
   11760 }
   11761 
   11762 /*
   11763  * wm_gmii_gs40g_readreg:	[mii interface function]
   11764  *
   11765  *	Read a PHY register on the I2100 and I211.
   11766  * This could be handled by the PHY layer if we didn't have to lock the
   11767  * resource ...
   11768  */
   11769 static int
   11770 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11771 {
   11772 	struct wm_softc *sc = device_private(dev);
   11773 	int page, offset;
   11774 	int rv;
   11775 
   11776 	/* Acquire semaphore */
   11777 	if (sc->phy.acquire(sc)) {
   11778 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11779 		return -1;
   11780 	}
   11781 
   11782 	/* Page select */
   11783 	page = reg >> GS40G_PAGE_SHIFT;
   11784 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11785 	if (rv != 0)
   11786 		goto release;
   11787 
   11788 	/* Read reg */
   11789 	offset = reg & GS40G_OFFSET_MASK;
   11790 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11791 
   11792 release:
   11793 	sc->phy.release(sc);
   11794 	return rv;
   11795 }
   11796 
   11797 /*
   11798  * wm_gmii_gs40g_writereg:	[mii interface function]
   11799  *
   11800  *	Write a PHY register on the I210 and I211.
   11801  * This could be handled by the PHY layer if we didn't have to lock the
   11802  * resource ...
   11803  */
   11804 static int
   11805 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11806 {
   11807 	struct wm_softc *sc = device_private(dev);
   11808 	uint16_t page;
   11809 	int offset, rv;
   11810 
   11811 	/* Acquire semaphore */
   11812 	if (sc->phy.acquire(sc)) {
   11813 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11814 		return -1;
   11815 	}
   11816 
   11817 	/* Page select */
   11818 	page = reg >> GS40G_PAGE_SHIFT;
   11819 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11820 	if (rv != 0)
   11821 		goto release;
   11822 
   11823 	/* Write reg */
   11824 	offset = reg & GS40G_OFFSET_MASK;
   11825 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11826 
   11827 release:
   11828 	/* Release semaphore */
   11829 	sc->phy.release(sc);
   11830 	return rv;
   11831 }
   11832 
   11833 /*
   11834  * wm_gmii_statchg:	[mii interface function]
   11835  *
   11836  *	Callback from MII layer when media changes.
   11837  */
   11838 static void
   11839 wm_gmii_statchg(struct ifnet *ifp)
   11840 {
   11841 	struct wm_softc *sc = ifp->if_softc;
   11842 	struct mii_data *mii = &sc->sc_mii;
   11843 
   11844 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11845 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11846 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11847 
   11848 	/* Get flow control negotiation result. */
   11849 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11850 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11851 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11852 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11853 	}
   11854 
   11855 	if (sc->sc_flowflags & IFM_FLOW) {
   11856 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11857 			sc->sc_ctrl |= CTRL_TFCE;
   11858 			sc->sc_fcrtl |= FCRTL_XONE;
   11859 		}
   11860 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11861 			sc->sc_ctrl |= CTRL_RFCE;
   11862 	}
   11863 
   11864 	if (mii->mii_media_active & IFM_FDX) {
   11865 		DPRINTF(sc, WM_DEBUG_LINK,
   11866 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11867 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11868 	} else {
   11869 		DPRINTF(sc, WM_DEBUG_LINK,
   11870 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11871 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11872 	}
   11873 
   11874 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11875 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11876 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11877 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11878 	if (sc->sc_type == WM_T_80003) {
   11879 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11880 		case IFM_1000_T:
   11881 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11882 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11883 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11884 			break;
   11885 		default:
   11886 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11887 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11888 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11889 			break;
   11890 		}
   11891 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11892 	}
   11893 }
   11894 
   11895 /* kumeran related (80003, ICH* and PCH*) */
   11896 
   11897 /*
   11898  * wm_kmrn_readreg:
   11899  *
   11900  *	Read a kumeran register
   11901  */
   11902 static int
   11903 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11904 {
   11905 	int rv;
   11906 
   11907 	if (sc->sc_type == WM_T_80003)
   11908 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11909 	else
   11910 		rv = sc->phy.acquire(sc);
   11911 	if (rv != 0) {
   11912 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11913 		    __func__);
   11914 		return rv;
   11915 	}
   11916 
   11917 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11918 
   11919 	if (sc->sc_type == WM_T_80003)
   11920 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11921 	else
   11922 		sc->phy.release(sc);
   11923 
   11924 	return rv;
   11925 }
   11926 
   11927 static int
   11928 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11929 {
   11930 
   11931 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11932 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11933 	    KUMCTRLSTA_REN);
   11934 	CSR_WRITE_FLUSH(sc);
   11935 	delay(2);
   11936 
   11937 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11938 
   11939 	return 0;
   11940 }
   11941 
   11942 /*
   11943  * wm_kmrn_writereg:
   11944  *
   11945  *	Write a kumeran register
   11946  */
   11947 static int
   11948 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11949 {
   11950 	int rv;
   11951 
   11952 	if (sc->sc_type == WM_T_80003)
   11953 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11954 	else
   11955 		rv = sc->phy.acquire(sc);
   11956 	if (rv != 0) {
   11957 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11958 		    __func__);
   11959 		return rv;
   11960 	}
   11961 
   11962 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11963 
   11964 	if (sc->sc_type == WM_T_80003)
   11965 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11966 	else
   11967 		sc->phy.release(sc);
   11968 
   11969 	return rv;
   11970 }
   11971 
   11972 static int
   11973 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11974 {
   11975 
   11976 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11977 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11978 
   11979 	return 0;
   11980 }
   11981 
   11982 /*
   11983  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11984  * This access method is different from IEEE MMD.
   11985  */
   11986 static int
   11987 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11988 {
   11989 	struct wm_softc *sc = device_private(dev);
   11990 	int rv;
   11991 
   11992 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11993 	if (rv != 0)
   11994 		return rv;
   11995 
   11996 	if (rd)
   11997 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11998 	else
   11999 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12000 	return rv;
   12001 }
   12002 
   12003 static int
   12004 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12005 {
   12006 
   12007 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12008 }
   12009 
   12010 static int
   12011 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12012 {
   12013 
   12014 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12015 }
   12016 
   12017 /* SGMII related */
   12018 
   12019 /*
   12020  * wm_sgmii_uses_mdio
   12021  *
   12022  * Check whether the transaction is to the internal PHY or the external
   12023  * MDIO interface. Return true if it's MDIO.
   12024  */
   12025 static bool
   12026 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12027 {
   12028 	uint32_t reg;
   12029 	bool ismdio = false;
   12030 
   12031 	switch (sc->sc_type) {
   12032 	case WM_T_82575:
   12033 	case WM_T_82576:
   12034 		reg = CSR_READ(sc, WMREG_MDIC);
   12035 		ismdio = ((reg & MDIC_DEST) != 0);
   12036 		break;
   12037 	case WM_T_82580:
   12038 	case WM_T_I350:
   12039 	case WM_T_I354:
   12040 	case WM_T_I210:
   12041 	case WM_T_I211:
   12042 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12043 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12044 		break;
   12045 	default:
   12046 		break;
   12047 	}
   12048 
   12049 	return ismdio;
   12050 }
   12051 
   12052 /* Setup internal SGMII PHY for SFP */
   12053 static void
   12054 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12055 {
   12056 	uint16_t id1, id2, phyreg;
   12057 	int i, rv;
   12058 
   12059 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12060 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12061 		return;
   12062 
   12063 	for (i = 0; i < MII_NPHY; i++) {
   12064 		sc->phy.no_errprint = true;
   12065 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12066 		if (rv != 0)
   12067 			continue;
   12068 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12069 		if (rv != 0)
   12070 			continue;
   12071 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12072 			continue;
   12073 		sc->phy.no_errprint = false;
   12074 
   12075 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12076 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12077 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12078 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12079 		break;
   12080 	}
   12081 
   12082 }
   12083 
   12084 /*
   12085  * wm_sgmii_readreg:	[mii interface function]
   12086  *
   12087  *	Read a PHY register on the SGMII
   12088  * This could be handled by the PHY layer if we didn't have to lock the
   12089  * resource ...
   12090  */
   12091 static int
   12092 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12093 {
   12094 	struct wm_softc *sc = device_private(dev);
   12095 	int rv;
   12096 
   12097 	if (sc->phy.acquire(sc)) {
   12098 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12099 		return -1;
   12100 	}
   12101 
   12102 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12103 
   12104 	sc->phy.release(sc);
   12105 	return rv;
   12106 }
   12107 
   12108 static int
   12109 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12110 {
   12111 	struct wm_softc *sc = device_private(dev);
   12112 	uint32_t i2ccmd;
   12113 	int i, rv = 0;
   12114 
   12115 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12116 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12117 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12118 
   12119 	/* Poll the ready bit */
   12120 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12121 		delay(50);
   12122 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12123 		if (i2ccmd & I2CCMD_READY)
   12124 			break;
   12125 	}
   12126 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12127 		device_printf(dev, "I2CCMD Read did not complete\n");
   12128 		rv = ETIMEDOUT;
   12129 	}
   12130 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12131 		if (!sc->phy.no_errprint)
   12132 			device_printf(dev, "I2CCMD Error bit set\n");
   12133 		rv = EIO;
   12134 	}
   12135 
   12136 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12137 
   12138 	return rv;
   12139 }
   12140 
   12141 /*
   12142  * wm_sgmii_writereg:	[mii interface function]
   12143  *
   12144  *	Write a PHY register on the SGMII.
   12145  * This could be handled by the PHY layer if we didn't have to lock the
   12146  * resource ...
   12147  */
   12148 static int
   12149 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12150 {
   12151 	struct wm_softc *sc = device_private(dev);
   12152 	int rv;
   12153 
   12154 	if (sc->phy.acquire(sc) != 0) {
   12155 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12156 		return -1;
   12157 	}
   12158 
   12159 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12160 
   12161 	sc->phy.release(sc);
   12162 
   12163 	return rv;
   12164 }
   12165 
   12166 static int
   12167 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12168 {
   12169 	struct wm_softc *sc = device_private(dev);
   12170 	uint32_t i2ccmd;
   12171 	uint16_t swapdata;
   12172 	int rv = 0;
   12173 	int i;
   12174 
   12175 	/* Swap the data bytes for the I2C interface */
   12176 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12177 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12178 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12179 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12180 
   12181 	/* Poll the ready bit */
   12182 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12183 		delay(50);
   12184 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12185 		if (i2ccmd & I2CCMD_READY)
   12186 			break;
   12187 	}
   12188 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12189 		device_printf(dev, "I2CCMD Write did not complete\n");
   12190 		rv = ETIMEDOUT;
   12191 	}
   12192 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12193 		device_printf(dev, "I2CCMD Error bit set\n");
   12194 		rv = EIO;
   12195 	}
   12196 
   12197 	return rv;
   12198 }
   12199 
   12200 /* TBI related */
   12201 
   12202 static bool
   12203 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12204 {
   12205 	bool sig;
   12206 
   12207 	sig = ctrl & CTRL_SWDPIN(1);
   12208 
   12209 	/*
   12210 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12211 	 * detect a signal, 1 if they don't.
   12212 	 */
   12213 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12214 		sig = !sig;
   12215 
   12216 	return sig;
   12217 }
   12218 
   12219 /*
   12220  * wm_tbi_mediainit:
   12221  *
   12222  *	Initialize media for use on 1000BASE-X devices.
   12223  */
   12224 static void
   12225 wm_tbi_mediainit(struct wm_softc *sc)
   12226 {
   12227 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12228 	const char *sep = "";
   12229 
   12230 	if (sc->sc_type < WM_T_82543)
   12231 		sc->sc_tipg = TIPG_WM_DFLT;
   12232 	else
   12233 		sc->sc_tipg = TIPG_LG_DFLT;
   12234 
   12235 	sc->sc_tbi_serdes_anegticks = 5;
   12236 
   12237 	/* Initialize our media structures */
   12238 	sc->sc_mii.mii_ifp = ifp;
   12239 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12240 
   12241 	ifp->if_baudrate = IF_Gbps(1);
   12242 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12243 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12244 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12245 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12246 		    sc->sc_core_lock);
   12247 	} else {
   12248 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12249 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12250 	}
   12251 
   12252 	/*
   12253 	 * SWD Pins:
   12254 	 *
   12255 	 *	0 = Link LED (output)
   12256 	 *	1 = Loss Of Signal (input)
   12257 	 */
   12258 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12259 
   12260 	/* XXX Perhaps this is only for TBI */
   12261 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12262 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12263 
   12264 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12265 		sc->sc_ctrl &= ~CTRL_LRST;
   12266 
   12267 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12268 
   12269 #define	ADD(ss, mm, dd)							\
   12270 do {									\
   12271 	aprint_normal("%s%s", sep, ss);					\
   12272 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12273 	sep = ", ";							\
   12274 } while (/*CONSTCOND*/0)
   12275 
   12276 	aprint_normal_dev(sc->sc_dev, "");
   12277 
   12278 	if (sc->sc_type == WM_T_I354) {
   12279 		uint32_t status;
   12280 
   12281 		status = CSR_READ(sc, WMREG_STATUS);
   12282 		if (((status & STATUS_2P5_SKU) != 0)
   12283 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12284 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12285 		} else
   12286 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12287 	} else if (sc->sc_type == WM_T_82545) {
   12288 		/* Only 82545 is LX (XXX except SFP) */
   12289 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12290 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12291 	} else if (sc->sc_sfptype != 0) {
   12292 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12293 		switch (sc->sc_sfptype) {
   12294 		default:
   12295 		case SFF_SFP_ETH_FLAGS_1000SX:
   12296 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12297 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12298 			break;
   12299 		case SFF_SFP_ETH_FLAGS_1000LX:
   12300 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12301 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12302 			break;
   12303 		case SFF_SFP_ETH_FLAGS_1000CX:
   12304 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12305 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12306 			break;
   12307 		case SFF_SFP_ETH_FLAGS_1000T:
   12308 			ADD("1000baseT", IFM_1000_T, 0);
   12309 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12310 			break;
   12311 		case SFF_SFP_ETH_FLAGS_100FX:
   12312 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12313 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12314 			break;
   12315 		}
   12316 	} else {
   12317 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12318 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12319 	}
   12320 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12321 	aprint_normal("\n");
   12322 
   12323 #undef ADD
   12324 
   12325 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12326 }
   12327 
   12328 /*
   12329  * wm_tbi_mediachange:	[ifmedia interface function]
   12330  *
   12331  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12332  */
   12333 static int
   12334 wm_tbi_mediachange(struct ifnet *ifp)
   12335 {
   12336 	struct wm_softc *sc = ifp->if_softc;
   12337 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12338 	uint32_t status, ctrl;
   12339 	bool signal;
   12340 	int i;
   12341 
   12342 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12343 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12344 		/* XXX need some work for >= 82571 and < 82575 */
   12345 		if (sc->sc_type < WM_T_82575)
   12346 			return 0;
   12347 	}
   12348 
   12349 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12350 	    || (sc->sc_type >= WM_T_82575))
   12351 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12352 
   12353 	sc->sc_ctrl &= ~CTRL_LRST;
   12354 	sc->sc_txcw = TXCW_ANE;
   12355 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12356 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12357 	else if (ife->ifm_media & IFM_FDX)
   12358 		sc->sc_txcw |= TXCW_FD;
   12359 	else
   12360 		sc->sc_txcw |= TXCW_HD;
   12361 
   12362 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12363 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12364 
   12365 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12366 		device_xname(sc->sc_dev), sc->sc_txcw));
   12367 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12368 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12369 	CSR_WRITE_FLUSH(sc);
   12370 	delay(1000);
   12371 
   12372 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12373 	signal = wm_tbi_havesignal(sc, ctrl);
   12374 
   12375 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12376 		signal));
   12377 
   12378 	if (signal) {
   12379 		/* Have signal; wait for the link to come up. */
   12380 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12381 			delay(10000);
   12382 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12383 				break;
   12384 		}
   12385 
   12386 		DPRINTF(sc, WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12387 			device_xname(sc->sc_dev), i));
   12388 
   12389 		status = CSR_READ(sc, WMREG_STATUS);
   12390 		DPRINTF(sc, WM_DEBUG_LINK,
   12391 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12392 			device_xname(sc->sc_dev), status, STATUS_LU));
   12393 		if (status & STATUS_LU) {
   12394 			/* Link is up. */
   12395 			DPRINTF(sc, WM_DEBUG_LINK,
   12396 			    ("%s: LINK: set media -> link up %s\n",
   12397 				device_xname(sc->sc_dev),
   12398 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12399 
   12400 			/*
   12401 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12402 			 * so we should update sc->sc_ctrl
   12403 			 */
   12404 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12405 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12406 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12407 			if (status & STATUS_FD)
   12408 				sc->sc_tctl |=
   12409 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12410 			else
   12411 				sc->sc_tctl |=
   12412 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12413 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12414 				sc->sc_fcrtl |= FCRTL_XONE;
   12415 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12416 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12417 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12418 			sc->sc_tbi_linkup = 1;
   12419 		} else {
   12420 			if (i == WM_LINKUP_TIMEOUT)
   12421 				wm_check_for_link(sc);
   12422 			/* Link is down. */
   12423 			DPRINTF(sc, WM_DEBUG_LINK,
   12424 			    ("%s: LINK: set media -> link down\n",
   12425 				device_xname(sc->sc_dev)));
   12426 			sc->sc_tbi_linkup = 0;
   12427 		}
   12428 	} else {
   12429 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12430 			device_xname(sc->sc_dev)));
   12431 		sc->sc_tbi_linkup = 0;
   12432 	}
   12433 
   12434 	wm_tbi_serdes_set_linkled(sc);
   12435 
   12436 	return 0;
   12437 }
   12438 
   12439 /*
   12440  * wm_tbi_mediastatus:	[ifmedia interface function]
   12441  *
   12442  *	Get the current interface media status on a 1000BASE-X device.
   12443  */
   12444 static void
   12445 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12446 {
   12447 	struct wm_softc *sc = ifp->if_softc;
   12448 	uint32_t ctrl, status;
   12449 
   12450 	ifmr->ifm_status = IFM_AVALID;
   12451 	ifmr->ifm_active = IFM_ETHER;
   12452 
   12453 	status = CSR_READ(sc, WMREG_STATUS);
   12454 	if ((status & STATUS_LU) == 0) {
   12455 		ifmr->ifm_active |= IFM_NONE;
   12456 		return;
   12457 	}
   12458 
   12459 	ifmr->ifm_status |= IFM_ACTIVE;
   12460 	/* Only 82545 is LX */
   12461 	if (sc->sc_type == WM_T_82545)
   12462 		ifmr->ifm_active |= IFM_1000_LX;
   12463 	else
   12464 		ifmr->ifm_active |= IFM_1000_SX;
   12465 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12466 		ifmr->ifm_active |= IFM_FDX;
   12467 	else
   12468 		ifmr->ifm_active |= IFM_HDX;
   12469 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12470 	if (ctrl & CTRL_RFCE)
   12471 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12472 	if (ctrl & CTRL_TFCE)
   12473 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12474 }
   12475 
   12476 /* XXX TBI only */
   12477 static int
   12478 wm_check_for_link(struct wm_softc *sc)
   12479 {
   12480 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12481 	uint32_t rxcw;
   12482 	uint32_t ctrl;
   12483 	uint32_t status;
   12484 	bool signal;
   12485 
   12486 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   12487 		device_xname(sc->sc_dev), __func__));
   12488 
   12489 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12490 		/* XXX need some work for >= 82571 */
   12491 		if (sc->sc_type >= WM_T_82571) {
   12492 			sc->sc_tbi_linkup = 1;
   12493 			return 0;
   12494 		}
   12495 	}
   12496 
   12497 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12498 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12499 	status = CSR_READ(sc, WMREG_STATUS);
   12500 	signal = wm_tbi_havesignal(sc, ctrl);
   12501 
   12502 	DPRINTF(sc, WM_DEBUG_LINK,
   12503 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12504 		device_xname(sc->sc_dev), __func__, signal,
   12505 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12506 
   12507 	/*
   12508 	 * SWDPIN   LU RXCW
   12509 	 *	0    0	  0
   12510 	 *	0    0	  1	(should not happen)
   12511 	 *	0    1	  0	(should not happen)
   12512 	 *	0    1	  1	(should not happen)
   12513 	 *	1    0	  0	Disable autonego and force linkup
   12514 	 *	1    0	  1	got /C/ but not linkup yet
   12515 	 *	1    1	  0	(linkup)
   12516 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12517 	 *
   12518 	 */
   12519 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12520 		DPRINTF(sc, WM_DEBUG_LINK,
   12521 		    ("%s: %s: force linkup and fullduplex\n",
   12522 			device_xname(sc->sc_dev), __func__));
   12523 		sc->sc_tbi_linkup = 0;
   12524 		/* Disable auto-negotiation in the TXCW register */
   12525 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12526 
   12527 		/*
   12528 		 * Force link-up and also force full-duplex.
   12529 		 *
   12530 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12531 		 * so we should update sc->sc_ctrl
   12532 		 */
   12533 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12534 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12535 	} else if (((status & STATUS_LU) != 0)
   12536 	    && ((rxcw & RXCW_C) != 0)
   12537 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12538 		sc->sc_tbi_linkup = 1;
   12539 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12540 			device_xname(sc->sc_dev),
   12541 			__func__));
   12542 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12543 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12544 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12545 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   12546 			device_xname(sc->sc_dev), __func__));
   12547 	} else {
   12548 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12549 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12550 			status));
   12551 	}
   12552 
   12553 	return 0;
   12554 }
   12555 
   12556 /*
   12557  * wm_tbi_tick:
   12558  *
   12559  *	Check the link on TBI devices.
   12560  *	This function acts as mii_tick().
   12561  */
   12562 static void
   12563 wm_tbi_tick(struct wm_softc *sc)
   12564 {
   12565 	struct mii_data *mii = &sc->sc_mii;
   12566 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12567 	uint32_t status;
   12568 
   12569 	KASSERT(WM_CORE_LOCKED(sc));
   12570 
   12571 	status = CSR_READ(sc, WMREG_STATUS);
   12572 
   12573 	/* XXX is this needed? */
   12574 	(void)CSR_READ(sc, WMREG_RXCW);
   12575 	(void)CSR_READ(sc, WMREG_CTRL);
   12576 
   12577 	/* set link status */
   12578 	if ((status & STATUS_LU) == 0) {
   12579 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12580 			device_xname(sc->sc_dev)));
   12581 		sc->sc_tbi_linkup = 0;
   12582 	} else if (sc->sc_tbi_linkup == 0) {
   12583 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12584 			device_xname(sc->sc_dev),
   12585 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12586 		sc->sc_tbi_linkup = 1;
   12587 		sc->sc_tbi_serdes_ticks = 0;
   12588 	}
   12589 
   12590 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12591 		goto setled;
   12592 
   12593 	if ((status & STATUS_LU) == 0) {
   12594 		sc->sc_tbi_linkup = 0;
   12595 		/* If the timer expired, retry autonegotiation */
   12596 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12597 		    && (++sc->sc_tbi_serdes_ticks
   12598 			>= sc->sc_tbi_serdes_anegticks)) {
   12599 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12600 				device_xname(sc->sc_dev), __func__));
   12601 			sc->sc_tbi_serdes_ticks = 0;
   12602 			/*
   12603 			 * Reset the link, and let autonegotiation do
   12604 			 * its thing
   12605 			 */
   12606 			sc->sc_ctrl |= CTRL_LRST;
   12607 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12608 			CSR_WRITE_FLUSH(sc);
   12609 			delay(1000);
   12610 			sc->sc_ctrl &= ~CTRL_LRST;
   12611 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12612 			CSR_WRITE_FLUSH(sc);
   12613 			delay(1000);
   12614 			CSR_WRITE(sc, WMREG_TXCW,
   12615 			    sc->sc_txcw & ~TXCW_ANE);
   12616 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12617 		}
   12618 	}
   12619 
   12620 setled:
   12621 	wm_tbi_serdes_set_linkled(sc);
   12622 }
   12623 
   12624 /* SERDES related */
   12625 static void
   12626 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12627 {
   12628 	uint32_t reg;
   12629 
   12630 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12631 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12632 		return;
   12633 
   12634 	/* Enable PCS to turn on link */
   12635 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12636 	reg |= PCS_CFG_PCS_EN;
   12637 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12638 
   12639 	/* Power up the laser */
   12640 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12641 	reg &= ~CTRL_EXT_SWDPIN(3);
   12642 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12643 
   12644 	/* Flush the write to verify completion */
   12645 	CSR_WRITE_FLUSH(sc);
   12646 	delay(1000);
   12647 }
   12648 
   12649 static int
   12650 wm_serdes_mediachange(struct ifnet *ifp)
   12651 {
   12652 	struct wm_softc *sc = ifp->if_softc;
   12653 	bool pcs_autoneg = true; /* XXX */
   12654 	uint32_t ctrl_ext, pcs_lctl, reg;
   12655 
   12656 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12657 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12658 		return 0;
   12659 
   12660 	/* XXX Currently, this function is not called on 8257[12] */
   12661 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12662 	    || (sc->sc_type >= WM_T_82575))
   12663 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12664 
   12665 	/* Power on the sfp cage if present */
   12666 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12667 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12668 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12669 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12670 
   12671 	sc->sc_ctrl |= CTRL_SLU;
   12672 
   12673 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   12674 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12675 
   12676 		reg = CSR_READ(sc, WMREG_CONNSW);
   12677 		reg |= CONNSW_ENRGSRC;
   12678 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   12679 	}
   12680 
   12681 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12682 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12683 	case CTRL_EXT_LINK_MODE_SGMII:
   12684 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12685 		pcs_autoneg = true;
   12686 		/* Autoneg time out should be disabled for SGMII mode */
   12687 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12688 		break;
   12689 	case CTRL_EXT_LINK_MODE_1000KX:
   12690 		pcs_autoneg = false;
   12691 		/* FALLTHROUGH */
   12692 	default:
   12693 		if ((sc->sc_type == WM_T_82575)
   12694 		    || (sc->sc_type == WM_T_82576)) {
   12695 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12696 				pcs_autoneg = false;
   12697 		}
   12698 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12699 		    | CTRL_FRCFDX;
   12700 
   12701 		/* Set speed of 1000/Full if speed/duplex is forced */
   12702 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12703 	}
   12704 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12705 
   12706 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12707 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12708 
   12709 	if (pcs_autoneg) {
   12710 		/* Set PCS register for autoneg */
   12711 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12712 
   12713 		/* Disable force flow control for autoneg */
   12714 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12715 
   12716 		/* Configure flow control advertisement for autoneg */
   12717 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12718 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12719 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12720 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12721 	} else
   12722 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12723 
   12724 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12725 
   12726 	return 0;
   12727 }
   12728 
   12729 static void
   12730 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12731 {
   12732 	struct wm_softc *sc = ifp->if_softc;
   12733 	struct mii_data *mii = &sc->sc_mii;
   12734 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12735 	uint32_t pcs_adv, pcs_lpab, reg;
   12736 
   12737 	ifmr->ifm_status = IFM_AVALID;
   12738 	ifmr->ifm_active = IFM_ETHER;
   12739 
   12740 	/* Check PCS */
   12741 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12742 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12743 		ifmr->ifm_active |= IFM_NONE;
   12744 		sc->sc_tbi_linkup = 0;
   12745 		goto setled;
   12746 	}
   12747 
   12748 	sc->sc_tbi_linkup = 1;
   12749 	ifmr->ifm_status |= IFM_ACTIVE;
   12750 	if (sc->sc_type == WM_T_I354) {
   12751 		uint32_t status;
   12752 
   12753 		status = CSR_READ(sc, WMREG_STATUS);
   12754 		if (((status & STATUS_2P5_SKU) != 0)
   12755 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12756 			ifmr->ifm_active |= IFM_2500_KX;
   12757 		} else
   12758 			ifmr->ifm_active |= IFM_1000_KX;
   12759 	} else {
   12760 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12761 		case PCS_LSTS_SPEED_10:
   12762 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12763 			break;
   12764 		case PCS_LSTS_SPEED_100:
   12765 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12766 			break;
   12767 		case PCS_LSTS_SPEED_1000:
   12768 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12769 			break;
   12770 		default:
   12771 			device_printf(sc->sc_dev, "Unknown speed\n");
   12772 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12773 			break;
   12774 		}
   12775 	}
   12776 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   12777 	if ((reg & PCS_LSTS_FDX) != 0)
   12778 		ifmr->ifm_active |= IFM_FDX;
   12779 	else
   12780 		ifmr->ifm_active |= IFM_HDX;
   12781 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12782 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12783 		/* Check flow */
   12784 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12785 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12786 			DPRINTF(sc, WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12787 			goto setled;
   12788 		}
   12789 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12790 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12791 		DPRINTF(sc, WM_DEBUG_LINK,
   12792 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12793 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12794 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12795 			mii->mii_media_active |= IFM_FLOW
   12796 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12797 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12798 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12799 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12800 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12801 			mii->mii_media_active |= IFM_FLOW
   12802 			    | IFM_ETH_TXPAUSE;
   12803 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12804 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12805 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12806 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12807 			mii->mii_media_active |= IFM_FLOW
   12808 			    | IFM_ETH_RXPAUSE;
   12809 		}
   12810 	}
   12811 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12812 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12813 setled:
   12814 	wm_tbi_serdes_set_linkled(sc);
   12815 }
   12816 
   12817 /*
   12818  * wm_serdes_tick:
   12819  *
   12820  *	Check the link on serdes devices.
   12821  */
   12822 static void
   12823 wm_serdes_tick(struct wm_softc *sc)
   12824 {
   12825 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12826 	struct mii_data *mii = &sc->sc_mii;
   12827 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12828 	uint32_t reg;
   12829 
   12830 	KASSERT(WM_CORE_LOCKED(sc));
   12831 
   12832 	mii->mii_media_status = IFM_AVALID;
   12833 	mii->mii_media_active = IFM_ETHER;
   12834 
   12835 	/* Check PCS */
   12836 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12837 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12838 		mii->mii_media_status |= IFM_ACTIVE;
   12839 		sc->sc_tbi_linkup = 1;
   12840 		sc->sc_tbi_serdes_ticks = 0;
   12841 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12842 		if ((reg & PCS_LSTS_FDX) != 0)
   12843 			mii->mii_media_active |= IFM_FDX;
   12844 		else
   12845 			mii->mii_media_active |= IFM_HDX;
   12846 	} else {
   12847 		mii->mii_media_status |= IFM_NONE;
   12848 		sc->sc_tbi_linkup = 0;
   12849 		/* If the timer expired, retry autonegotiation */
   12850 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12851 		    && (++sc->sc_tbi_serdes_ticks
   12852 			>= sc->sc_tbi_serdes_anegticks)) {
   12853 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12854 				device_xname(sc->sc_dev), __func__));
   12855 			sc->sc_tbi_serdes_ticks = 0;
   12856 			/* XXX */
   12857 			wm_serdes_mediachange(ifp);
   12858 		}
   12859 	}
   12860 
   12861 	wm_tbi_serdes_set_linkled(sc);
   12862 }
   12863 
   12864 /* SFP related */
   12865 
   12866 static int
   12867 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12868 {
   12869 	uint32_t i2ccmd;
   12870 	int i;
   12871 
   12872 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12873 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12874 
   12875 	/* Poll the ready bit */
   12876 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12877 		delay(50);
   12878 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12879 		if (i2ccmd & I2CCMD_READY)
   12880 			break;
   12881 	}
   12882 	if ((i2ccmd & I2CCMD_READY) == 0)
   12883 		return -1;
   12884 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12885 		return -1;
   12886 
   12887 	*data = i2ccmd & 0x00ff;
   12888 
   12889 	return 0;
   12890 }
   12891 
   12892 static uint32_t
   12893 wm_sfp_get_media_type(struct wm_softc *sc)
   12894 {
   12895 	uint32_t ctrl_ext;
   12896 	uint8_t val = 0;
   12897 	int timeout = 3;
   12898 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12899 	int rv = -1;
   12900 
   12901 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12902 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12903 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12904 	CSR_WRITE_FLUSH(sc);
   12905 
   12906 	/* Read SFP module data */
   12907 	while (timeout) {
   12908 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12909 		if (rv == 0)
   12910 			break;
   12911 		delay(100*1000); /* XXX too big */
   12912 		timeout--;
   12913 	}
   12914 	if (rv != 0)
   12915 		goto out;
   12916 
   12917 	switch (val) {
   12918 	case SFF_SFP_ID_SFF:
   12919 		aprint_normal_dev(sc->sc_dev,
   12920 		    "Module/Connector soldered to board\n");
   12921 		break;
   12922 	case SFF_SFP_ID_SFP:
   12923 		sc->sc_flags |= WM_F_SFP;
   12924 		break;
   12925 	case SFF_SFP_ID_UNKNOWN:
   12926 		goto out;
   12927 	default:
   12928 		break;
   12929 	}
   12930 
   12931 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12932 	if (rv != 0)
   12933 		goto out;
   12934 
   12935 	sc->sc_sfptype = val;
   12936 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12937 		mediatype = WM_MEDIATYPE_SERDES;
   12938 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12939 		sc->sc_flags |= WM_F_SGMII;
   12940 		mediatype = WM_MEDIATYPE_COPPER;
   12941 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12942 		sc->sc_flags |= WM_F_SGMII;
   12943 		mediatype = WM_MEDIATYPE_SERDES;
   12944 	} else {
   12945 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   12946 		    __func__, sc->sc_sfptype);
   12947 		sc->sc_sfptype = 0; /* XXX unknown */
   12948 	}
   12949 
   12950 out:
   12951 	/* Restore I2C interface setting */
   12952 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12953 
   12954 	return mediatype;
   12955 }
   12956 
   12957 /*
   12958  * NVM related.
   12959  * Microwire, SPI (w/wo EERD) and Flash.
   12960  */
   12961 
   12962 /* Both spi and uwire */
   12963 
   12964 /*
   12965  * wm_eeprom_sendbits:
   12966  *
   12967  *	Send a series of bits to the EEPROM.
   12968  */
   12969 static void
   12970 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12971 {
   12972 	uint32_t reg;
   12973 	int x;
   12974 
   12975 	reg = CSR_READ(sc, WMREG_EECD);
   12976 
   12977 	for (x = nbits; x > 0; x--) {
   12978 		if (bits & (1U << (x - 1)))
   12979 			reg |= EECD_DI;
   12980 		else
   12981 			reg &= ~EECD_DI;
   12982 		CSR_WRITE(sc, WMREG_EECD, reg);
   12983 		CSR_WRITE_FLUSH(sc);
   12984 		delay(2);
   12985 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12986 		CSR_WRITE_FLUSH(sc);
   12987 		delay(2);
   12988 		CSR_WRITE(sc, WMREG_EECD, reg);
   12989 		CSR_WRITE_FLUSH(sc);
   12990 		delay(2);
   12991 	}
   12992 }
   12993 
   12994 /*
   12995  * wm_eeprom_recvbits:
   12996  *
   12997  *	Receive a series of bits from the EEPROM.
   12998  */
   12999 static void
   13000 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13001 {
   13002 	uint32_t reg, val;
   13003 	int x;
   13004 
   13005 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13006 
   13007 	val = 0;
   13008 	for (x = nbits; x > 0; x--) {
   13009 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13010 		CSR_WRITE_FLUSH(sc);
   13011 		delay(2);
   13012 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13013 			val |= (1U << (x - 1));
   13014 		CSR_WRITE(sc, WMREG_EECD, reg);
   13015 		CSR_WRITE_FLUSH(sc);
   13016 		delay(2);
   13017 	}
   13018 	*valp = val;
   13019 }
   13020 
   13021 /* Microwire */
   13022 
   13023 /*
   13024  * wm_nvm_read_uwire:
   13025  *
   13026  *	Read a word from the EEPROM using the MicroWire protocol.
   13027  */
   13028 static int
   13029 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13030 {
   13031 	uint32_t reg, val;
   13032 	int i;
   13033 
   13034 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13035 		device_xname(sc->sc_dev), __func__));
   13036 
   13037 	if (sc->nvm.acquire(sc) != 0)
   13038 		return -1;
   13039 
   13040 	for (i = 0; i < wordcnt; i++) {
   13041 		/* Clear SK and DI. */
   13042 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13043 		CSR_WRITE(sc, WMREG_EECD, reg);
   13044 
   13045 		/*
   13046 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13047 		 * and Xen.
   13048 		 *
   13049 		 * We use this workaround only for 82540 because qemu's
   13050 		 * e1000 act as 82540.
   13051 		 */
   13052 		if (sc->sc_type == WM_T_82540) {
   13053 			reg |= EECD_SK;
   13054 			CSR_WRITE(sc, WMREG_EECD, reg);
   13055 			reg &= ~EECD_SK;
   13056 			CSR_WRITE(sc, WMREG_EECD, reg);
   13057 			CSR_WRITE_FLUSH(sc);
   13058 			delay(2);
   13059 		}
   13060 		/* XXX: end of workaround */
   13061 
   13062 		/* Set CHIP SELECT. */
   13063 		reg |= EECD_CS;
   13064 		CSR_WRITE(sc, WMREG_EECD, reg);
   13065 		CSR_WRITE_FLUSH(sc);
   13066 		delay(2);
   13067 
   13068 		/* Shift in the READ command. */
   13069 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13070 
   13071 		/* Shift in address. */
   13072 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13073 
   13074 		/* Shift out the data. */
   13075 		wm_eeprom_recvbits(sc, &val, 16);
   13076 		data[i] = val & 0xffff;
   13077 
   13078 		/* Clear CHIP SELECT. */
   13079 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13080 		CSR_WRITE(sc, WMREG_EECD, reg);
   13081 		CSR_WRITE_FLUSH(sc);
   13082 		delay(2);
   13083 	}
   13084 
   13085 	sc->nvm.release(sc);
   13086 	return 0;
   13087 }
   13088 
   13089 /* SPI */
   13090 
   13091 /*
   13092  * Set SPI and FLASH related information from the EECD register.
   13093  * For 82541 and 82547, the word size is taken from EEPROM.
   13094  */
   13095 static int
   13096 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13097 {
   13098 	int size;
   13099 	uint32_t reg;
   13100 	uint16_t data;
   13101 
   13102 	reg = CSR_READ(sc, WMREG_EECD);
   13103 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13104 
   13105 	/* Read the size of NVM from EECD by default */
   13106 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13107 	switch (sc->sc_type) {
   13108 	case WM_T_82541:
   13109 	case WM_T_82541_2:
   13110 	case WM_T_82547:
   13111 	case WM_T_82547_2:
   13112 		/* Set dummy value to access EEPROM */
   13113 		sc->sc_nvm_wordsize = 64;
   13114 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13115 			aprint_error_dev(sc->sc_dev,
   13116 			    "%s: failed to read EEPROM size\n", __func__);
   13117 		}
   13118 		reg = data;
   13119 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13120 		if (size == 0)
   13121 			size = 6; /* 64 word size */
   13122 		else
   13123 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13124 		break;
   13125 	case WM_T_80003:
   13126 	case WM_T_82571:
   13127 	case WM_T_82572:
   13128 	case WM_T_82573: /* SPI case */
   13129 	case WM_T_82574: /* SPI case */
   13130 	case WM_T_82583: /* SPI case */
   13131 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13132 		if (size > 14)
   13133 			size = 14;
   13134 		break;
   13135 	case WM_T_82575:
   13136 	case WM_T_82576:
   13137 	case WM_T_82580:
   13138 	case WM_T_I350:
   13139 	case WM_T_I354:
   13140 	case WM_T_I210:
   13141 	case WM_T_I211:
   13142 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13143 		if (size > 15)
   13144 			size = 15;
   13145 		break;
   13146 	default:
   13147 		aprint_error_dev(sc->sc_dev,
   13148 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13149 		return -1;
   13150 		break;
   13151 	}
   13152 
   13153 	sc->sc_nvm_wordsize = 1 << size;
   13154 
   13155 	return 0;
   13156 }
   13157 
   13158 /*
   13159  * wm_nvm_ready_spi:
   13160  *
   13161  *	Wait for a SPI EEPROM to be ready for commands.
   13162  */
   13163 static int
   13164 wm_nvm_ready_spi(struct wm_softc *sc)
   13165 {
   13166 	uint32_t val;
   13167 	int usec;
   13168 
   13169 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13170 		device_xname(sc->sc_dev), __func__));
   13171 
   13172 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13173 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13174 		wm_eeprom_recvbits(sc, &val, 8);
   13175 		if ((val & SPI_SR_RDY) == 0)
   13176 			break;
   13177 	}
   13178 	if (usec >= SPI_MAX_RETRIES) {
   13179 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13180 		return -1;
   13181 	}
   13182 	return 0;
   13183 }
   13184 
   13185 /*
   13186  * wm_nvm_read_spi:
   13187  *
   13188  *	Read a work from the EEPROM using the SPI protocol.
   13189  */
   13190 static int
   13191 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13192 {
   13193 	uint32_t reg, val;
   13194 	int i;
   13195 	uint8_t opc;
   13196 	int rv = 0;
   13197 
   13198 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13199 		device_xname(sc->sc_dev), __func__));
   13200 
   13201 	if (sc->nvm.acquire(sc) != 0)
   13202 		return -1;
   13203 
   13204 	/* Clear SK and CS. */
   13205 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13206 	CSR_WRITE(sc, WMREG_EECD, reg);
   13207 	CSR_WRITE_FLUSH(sc);
   13208 	delay(2);
   13209 
   13210 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13211 		goto out;
   13212 
   13213 	/* Toggle CS to flush commands. */
   13214 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13215 	CSR_WRITE_FLUSH(sc);
   13216 	delay(2);
   13217 	CSR_WRITE(sc, WMREG_EECD, reg);
   13218 	CSR_WRITE_FLUSH(sc);
   13219 	delay(2);
   13220 
   13221 	opc = SPI_OPC_READ;
   13222 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13223 		opc |= SPI_OPC_A8;
   13224 
   13225 	wm_eeprom_sendbits(sc, opc, 8);
   13226 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13227 
   13228 	for (i = 0; i < wordcnt; i++) {
   13229 		wm_eeprom_recvbits(sc, &val, 16);
   13230 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13231 	}
   13232 
   13233 	/* Raise CS and clear SK. */
   13234 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13235 	CSR_WRITE(sc, WMREG_EECD, reg);
   13236 	CSR_WRITE_FLUSH(sc);
   13237 	delay(2);
   13238 
   13239 out:
   13240 	sc->nvm.release(sc);
   13241 	return rv;
   13242 }
   13243 
   13244 /* Using with EERD */
   13245 
   13246 static int
   13247 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13248 {
   13249 	uint32_t attempts = 100000;
   13250 	uint32_t i, reg = 0;
   13251 	int32_t done = -1;
   13252 
   13253 	for (i = 0; i < attempts; i++) {
   13254 		reg = CSR_READ(sc, rw);
   13255 
   13256 		if (reg & EERD_DONE) {
   13257 			done = 0;
   13258 			break;
   13259 		}
   13260 		delay(5);
   13261 	}
   13262 
   13263 	return done;
   13264 }
   13265 
   13266 static int
   13267 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13268 {
   13269 	int i, eerd = 0;
   13270 	int rv = 0;
   13271 
   13272 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13273 		device_xname(sc->sc_dev), __func__));
   13274 
   13275 	if (sc->nvm.acquire(sc) != 0)
   13276 		return -1;
   13277 
   13278 	for (i = 0; i < wordcnt; i++) {
   13279 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13280 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13281 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13282 		if (rv != 0) {
   13283 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13284 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13285 			break;
   13286 		}
   13287 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13288 	}
   13289 
   13290 	sc->nvm.release(sc);
   13291 	return rv;
   13292 }
   13293 
   13294 /* Flash */
   13295 
   13296 static int
   13297 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13298 {
   13299 	uint32_t eecd;
   13300 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13301 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13302 	uint32_t nvm_dword = 0;
   13303 	uint8_t sig_byte = 0;
   13304 	int rv;
   13305 
   13306 	switch (sc->sc_type) {
   13307 	case WM_T_PCH_SPT:
   13308 	case WM_T_PCH_CNP:
   13309 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13310 		act_offset = ICH_NVM_SIG_WORD * 2;
   13311 
   13312 		/* Set bank to 0 in case flash read fails. */
   13313 		*bank = 0;
   13314 
   13315 		/* Check bank 0 */
   13316 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13317 		if (rv != 0)
   13318 			return rv;
   13319 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13320 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13321 			*bank = 0;
   13322 			return 0;
   13323 		}
   13324 
   13325 		/* Check bank 1 */
   13326 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13327 		    &nvm_dword);
   13328 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13329 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13330 			*bank = 1;
   13331 			return 0;
   13332 		}
   13333 		aprint_error_dev(sc->sc_dev,
   13334 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13335 		return -1;
   13336 	case WM_T_ICH8:
   13337 	case WM_T_ICH9:
   13338 		eecd = CSR_READ(sc, WMREG_EECD);
   13339 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13340 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13341 			return 0;
   13342 		}
   13343 		/* FALLTHROUGH */
   13344 	default:
   13345 		/* Default to 0 */
   13346 		*bank = 0;
   13347 
   13348 		/* Check bank 0 */
   13349 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13350 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13351 			*bank = 0;
   13352 			return 0;
   13353 		}
   13354 
   13355 		/* Check bank 1 */
   13356 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13357 		    &sig_byte);
   13358 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13359 			*bank = 1;
   13360 			return 0;
   13361 		}
   13362 	}
   13363 
   13364 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13365 		device_xname(sc->sc_dev)));
   13366 	return -1;
   13367 }
   13368 
   13369 /******************************************************************************
   13370  * This function does initial flash setup so that a new read/write/erase cycle
   13371  * can be started.
   13372  *
   13373  * sc - The pointer to the hw structure
   13374  ****************************************************************************/
   13375 static int32_t
   13376 wm_ich8_cycle_init(struct wm_softc *sc)
   13377 {
   13378 	uint16_t hsfsts;
   13379 	int32_t error = 1;
   13380 	int32_t i     = 0;
   13381 
   13382 	if (sc->sc_type >= WM_T_PCH_SPT)
   13383 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13384 	else
   13385 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13386 
   13387 	/* May be check the Flash Des Valid bit in Hw status */
   13388 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13389 		return error;
   13390 
   13391 	/* Clear FCERR in Hw status by writing 1 */
   13392 	/* Clear DAEL in Hw status by writing a 1 */
   13393 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13394 
   13395 	if (sc->sc_type >= WM_T_PCH_SPT)
   13396 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13397 	else
   13398 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13399 
   13400 	/*
   13401 	 * Either we should have a hardware SPI cycle in progress bit to check
   13402 	 * against, in order to start a new cycle or FDONE bit should be
   13403 	 * changed in the hardware so that it is 1 after hardware reset, which
   13404 	 * can then be used as an indication whether a cycle is in progress or
   13405 	 * has been completed .. we should also have some software semaphore
   13406 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13407 	 * threads access to those bits can be sequentiallized or a way so that
   13408 	 * 2 threads don't start the cycle at the same time
   13409 	 */
   13410 
   13411 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13412 		/*
   13413 		 * There is no cycle running at present, so we can start a
   13414 		 * cycle
   13415 		 */
   13416 
   13417 		/* Begin by setting Flash Cycle Done. */
   13418 		hsfsts |= HSFSTS_DONE;
   13419 		if (sc->sc_type >= WM_T_PCH_SPT)
   13420 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13421 			    hsfsts & 0xffffUL);
   13422 		else
   13423 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13424 		error = 0;
   13425 	} else {
   13426 		/*
   13427 		 * Otherwise poll for sometime so the current cycle has a
   13428 		 * chance to end before giving up.
   13429 		 */
   13430 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13431 			if (sc->sc_type >= WM_T_PCH_SPT)
   13432 				hsfsts = ICH8_FLASH_READ32(sc,
   13433 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13434 			else
   13435 				hsfsts = ICH8_FLASH_READ16(sc,
   13436 				    ICH_FLASH_HSFSTS);
   13437 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13438 				error = 0;
   13439 				break;
   13440 			}
   13441 			delay(1);
   13442 		}
   13443 		if (error == 0) {
   13444 			/*
   13445 			 * Successful in waiting for previous cycle to timeout,
   13446 			 * now set the Flash Cycle Done.
   13447 			 */
   13448 			hsfsts |= HSFSTS_DONE;
   13449 			if (sc->sc_type >= WM_T_PCH_SPT)
   13450 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13451 				    hsfsts & 0xffffUL);
   13452 			else
   13453 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13454 				    hsfsts);
   13455 		}
   13456 	}
   13457 	return error;
   13458 }
   13459 
   13460 /******************************************************************************
   13461  * This function starts a flash cycle and waits for its completion
   13462  *
   13463  * sc - The pointer to the hw structure
   13464  ****************************************************************************/
   13465 static int32_t
   13466 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13467 {
   13468 	uint16_t hsflctl;
   13469 	uint16_t hsfsts;
   13470 	int32_t error = 1;
   13471 	uint32_t i = 0;
   13472 
   13473 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13474 	if (sc->sc_type >= WM_T_PCH_SPT)
   13475 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13476 	else
   13477 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13478 	hsflctl |= HSFCTL_GO;
   13479 	if (sc->sc_type >= WM_T_PCH_SPT)
   13480 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13481 		    (uint32_t)hsflctl << 16);
   13482 	else
   13483 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13484 
   13485 	/* Wait till FDONE bit is set to 1 */
   13486 	do {
   13487 		if (sc->sc_type >= WM_T_PCH_SPT)
   13488 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13489 			    & 0xffffUL;
   13490 		else
   13491 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13492 		if (hsfsts & HSFSTS_DONE)
   13493 			break;
   13494 		delay(1);
   13495 		i++;
   13496 	} while (i < timeout);
   13497 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13498 		error = 0;
   13499 
   13500 	return error;
   13501 }
   13502 
   13503 /******************************************************************************
   13504  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13505  *
   13506  * sc - The pointer to the hw structure
   13507  * index - The index of the byte or word to read.
   13508  * size - Size of data to read, 1=byte 2=word, 4=dword
   13509  * data - Pointer to the word to store the value read.
   13510  *****************************************************************************/
   13511 static int32_t
   13512 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13513     uint32_t size, uint32_t *data)
   13514 {
   13515 	uint16_t hsfsts;
   13516 	uint16_t hsflctl;
   13517 	uint32_t flash_linear_address;
   13518 	uint32_t flash_data = 0;
   13519 	int32_t error = 1;
   13520 	int32_t count = 0;
   13521 
   13522 	if (size < 1  || size > 4 || data == 0x0 ||
   13523 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13524 		return error;
   13525 
   13526 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13527 	    sc->sc_ich8_flash_base;
   13528 
   13529 	do {
   13530 		delay(1);
   13531 		/* Steps */
   13532 		error = wm_ich8_cycle_init(sc);
   13533 		if (error)
   13534 			break;
   13535 
   13536 		if (sc->sc_type >= WM_T_PCH_SPT)
   13537 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13538 			    >> 16;
   13539 		else
   13540 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13541 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13542 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13543 		    & HSFCTL_BCOUNT_MASK;
   13544 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13545 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13546 			/*
   13547 			 * In SPT, This register is in Lan memory space, not
   13548 			 * flash. Therefore, only 32 bit access is supported.
   13549 			 */
   13550 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13551 			    (uint32_t)hsflctl << 16);
   13552 		} else
   13553 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13554 
   13555 		/*
   13556 		 * Write the last 24 bits of index into Flash Linear address
   13557 		 * field in Flash Address
   13558 		 */
   13559 		/* TODO: TBD maybe check the index against the size of flash */
   13560 
   13561 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13562 
   13563 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13564 
   13565 		/*
   13566 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13567 		 * the whole sequence a few more times, else read in (shift in)
   13568 		 * the Flash Data0, the order is least significant byte first
   13569 		 * msb to lsb
   13570 		 */
   13571 		if (error == 0) {
   13572 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13573 			if (size == 1)
   13574 				*data = (uint8_t)(flash_data & 0x000000FF);
   13575 			else if (size == 2)
   13576 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13577 			else if (size == 4)
   13578 				*data = (uint32_t)flash_data;
   13579 			break;
   13580 		} else {
   13581 			/*
   13582 			 * If we've gotten here, then things are probably
   13583 			 * completely hosed, but if the error condition is
   13584 			 * detected, it won't hurt to give it another try...
   13585 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13586 			 */
   13587 			if (sc->sc_type >= WM_T_PCH_SPT)
   13588 				hsfsts = ICH8_FLASH_READ32(sc,
   13589 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13590 			else
   13591 				hsfsts = ICH8_FLASH_READ16(sc,
   13592 				    ICH_FLASH_HSFSTS);
   13593 
   13594 			if (hsfsts & HSFSTS_ERR) {
   13595 				/* Repeat for some time before giving up. */
   13596 				continue;
   13597 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13598 				break;
   13599 		}
   13600 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13601 
   13602 	return error;
   13603 }
   13604 
   13605 /******************************************************************************
   13606  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13607  *
   13608  * sc - pointer to wm_hw structure
   13609  * index - The index of the byte to read.
   13610  * data - Pointer to a byte to store the value read.
   13611  *****************************************************************************/
   13612 static int32_t
   13613 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13614 {
   13615 	int32_t status;
   13616 	uint32_t word = 0;
   13617 
   13618 	status = wm_read_ich8_data(sc, index, 1, &word);
   13619 	if (status == 0)
   13620 		*data = (uint8_t)word;
   13621 	else
   13622 		*data = 0;
   13623 
   13624 	return status;
   13625 }
   13626 
   13627 /******************************************************************************
   13628  * Reads a word from the NVM using the ICH8 flash access registers.
   13629  *
   13630  * sc - pointer to wm_hw structure
   13631  * index - The starting byte index of the word to read.
   13632  * data - Pointer to a word to store the value read.
   13633  *****************************************************************************/
   13634 static int32_t
   13635 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13636 {
   13637 	int32_t status;
   13638 	uint32_t word = 0;
   13639 
   13640 	status = wm_read_ich8_data(sc, index, 2, &word);
   13641 	if (status == 0)
   13642 		*data = (uint16_t)word;
   13643 	else
   13644 		*data = 0;
   13645 
   13646 	return status;
   13647 }
   13648 
   13649 /******************************************************************************
   13650  * Reads a dword from the NVM using the ICH8 flash access registers.
   13651  *
   13652  * sc - pointer to wm_hw structure
   13653  * index - The starting byte index of the word to read.
   13654  * data - Pointer to a word to store the value read.
   13655  *****************************************************************************/
   13656 static int32_t
   13657 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13658 {
   13659 	int32_t status;
   13660 
   13661 	status = wm_read_ich8_data(sc, index, 4, data);
   13662 	return status;
   13663 }
   13664 
   13665 /******************************************************************************
   13666  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13667  * register.
   13668  *
   13669  * sc - Struct containing variables accessed by shared code
   13670  * offset - offset of word in the EEPROM to read
   13671  * data - word read from the EEPROM
   13672  * words - number of words to read
   13673  *****************************************************************************/
   13674 static int
   13675 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13676 {
   13677 	int32_t	 rv = 0;
   13678 	uint32_t flash_bank = 0;
   13679 	uint32_t act_offset = 0;
   13680 	uint32_t bank_offset = 0;
   13681 	uint16_t word = 0;
   13682 	uint16_t i = 0;
   13683 
   13684 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13685 		device_xname(sc->sc_dev), __func__));
   13686 
   13687 	if (sc->nvm.acquire(sc) != 0)
   13688 		return -1;
   13689 
   13690 	/*
   13691 	 * We need to know which is the valid flash bank.  In the event
   13692 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13693 	 * managing flash_bank. So it cannot be trusted and needs
   13694 	 * to be updated with each read.
   13695 	 */
   13696 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13697 	if (rv) {
   13698 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13699 			device_xname(sc->sc_dev)));
   13700 		flash_bank = 0;
   13701 	}
   13702 
   13703 	/*
   13704 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13705 	 * size
   13706 	 */
   13707 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13708 
   13709 	for (i = 0; i < words; i++) {
   13710 		/* The NVM part needs a byte offset, hence * 2 */
   13711 		act_offset = bank_offset + ((offset + i) * 2);
   13712 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13713 		if (rv) {
   13714 			aprint_error_dev(sc->sc_dev,
   13715 			    "%s: failed to read NVM\n", __func__);
   13716 			break;
   13717 		}
   13718 		data[i] = word;
   13719 	}
   13720 
   13721 	sc->nvm.release(sc);
   13722 	return rv;
   13723 }
   13724 
   13725 /******************************************************************************
   13726  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13727  * register.
   13728  *
   13729  * sc - Struct containing variables accessed by shared code
   13730  * offset - offset of word in the EEPROM to read
   13731  * data - word read from the EEPROM
   13732  * words - number of words to read
   13733  *****************************************************************************/
   13734 static int
   13735 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13736 {
   13737 	int32_t	 rv = 0;
   13738 	uint32_t flash_bank = 0;
   13739 	uint32_t act_offset = 0;
   13740 	uint32_t bank_offset = 0;
   13741 	uint32_t dword = 0;
   13742 	uint16_t i = 0;
   13743 
   13744 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13745 		device_xname(sc->sc_dev), __func__));
   13746 
   13747 	if (sc->nvm.acquire(sc) != 0)
   13748 		return -1;
   13749 
   13750 	/*
   13751 	 * We need to know which is the valid flash bank.  In the event
   13752 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13753 	 * managing flash_bank. So it cannot be trusted and needs
   13754 	 * to be updated with each read.
   13755 	 */
   13756 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13757 	if (rv) {
   13758 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13759 			device_xname(sc->sc_dev)));
   13760 		flash_bank = 0;
   13761 	}
   13762 
   13763 	/*
   13764 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13765 	 * size
   13766 	 */
   13767 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13768 
   13769 	for (i = 0; i < words; i++) {
   13770 		/* The NVM part needs a byte offset, hence * 2 */
   13771 		act_offset = bank_offset + ((offset + i) * 2);
   13772 		/* but we must read dword aligned, so mask ... */
   13773 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13774 		if (rv) {
   13775 			aprint_error_dev(sc->sc_dev,
   13776 			    "%s: failed to read NVM\n", __func__);
   13777 			break;
   13778 		}
   13779 		/* ... and pick out low or high word */
   13780 		if ((act_offset & 0x2) == 0)
   13781 			data[i] = (uint16_t)(dword & 0xFFFF);
   13782 		else
   13783 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13784 	}
   13785 
   13786 	sc->nvm.release(sc);
   13787 	return rv;
   13788 }
   13789 
   13790 /* iNVM */
   13791 
   13792 static int
   13793 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13794 {
   13795 	int32_t	 rv = 0;
   13796 	uint32_t invm_dword;
   13797 	uint16_t i;
   13798 	uint8_t record_type, word_address;
   13799 
   13800 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13801 		device_xname(sc->sc_dev), __func__));
   13802 
   13803 	for (i = 0; i < INVM_SIZE; i++) {
   13804 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13805 		/* Get record type */
   13806 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13807 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13808 			break;
   13809 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13810 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13811 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13812 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13813 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13814 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13815 			if (word_address == address) {
   13816 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13817 				rv = 0;
   13818 				break;
   13819 			}
   13820 		}
   13821 	}
   13822 
   13823 	return rv;
   13824 }
   13825 
   13826 static int
   13827 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13828 {
   13829 	int rv = 0;
   13830 	int i;
   13831 
   13832 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13833 		device_xname(sc->sc_dev), __func__));
   13834 
   13835 	if (sc->nvm.acquire(sc) != 0)
   13836 		return -1;
   13837 
   13838 	for (i = 0; i < words; i++) {
   13839 		switch (offset + i) {
   13840 		case NVM_OFF_MACADDR:
   13841 		case NVM_OFF_MACADDR1:
   13842 		case NVM_OFF_MACADDR2:
   13843 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13844 			if (rv != 0) {
   13845 				data[i] = 0xffff;
   13846 				rv = -1;
   13847 			}
   13848 			break;
   13849 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   13850 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13851 			if (rv != 0) {
   13852 				*data = INVM_DEFAULT_AL;
   13853 				rv = 0;
   13854 			}
   13855 			break;
   13856 		case NVM_OFF_CFG2:
   13857 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13858 			if (rv != 0) {
   13859 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13860 				rv = 0;
   13861 			}
   13862 			break;
   13863 		case NVM_OFF_CFG4:
   13864 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13865 			if (rv != 0) {
   13866 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13867 				rv = 0;
   13868 			}
   13869 			break;
   13870 		case NVM_OFF_LED_1_CFG:
   13871 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13872 			if (rv != 0) {
   13873 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13874 				rv = 0;
   13875 			}
   13876 			break;
   13877 		case NVM_OFF_LED_0_2_CFG:
   13878 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13879 			if (rv != 0) {
   13880 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13881 				rv = 0;
   13882 			}
   13883 			break;
   13884 		case NVM_OFF_ID_LED_SETTINGS:
   13885 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13886 			if (rv != 0) {
   13887 				*data = ID_LED_RESERVED_FFFF;
   13888 				rv = 0;
   13889 			}
   13890 			break;
   13891 		default:
   13892 			DPRINTF(sc, WM_DEBUG_NVM,
   13893 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13894 			*data = NVM_RESERVED_WORD;
   13895 			break;
   13896 		}
   13897 	}
   13898 
   13899 	sc->nvm.release(sc);
   13900 	return rv;
   13901 }
   13902 
   13903 /* Lock, detecting NVM type, validate checksum, version and read */
   13904 
   13905 static int
   13906 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13907 {
   13908 	uint32_t eecd = 0;
   13909 
   13910 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13911 	    || sc->sc_type == WM_T_82583) {
   13912 		eecd = CSR_READ(sc, WMREG_EECD);
   13913 
   13914 		/* Isolate bits 15 & 16 */
   13915 		eecd = ((eecd >> 15) & 0x03);
   13916 
   13917 		/* If both bits are set, device is Flash type */
   13918 		if (eecd == 0x03)
   13919 			return 0;
   13920 	}
   13921 	return 1;
   13922 }
   13923 
   13924 static int
   13925 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13926 {
   13927 	uint32_t eec;
   13928 
   13929 	eec = CSR_READ(sc, WMREG_EEC);
   13930 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13931 		return 1;
   13932 
   13933 	return 0;
   13934 }
   13935 
   13936 /*
   13937  * wm_nvm_validate_checksum
   13938  *
   13939  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13940  */
   13941 static int
   13942 wm_nvm_validate_checksum(struct wm_softc *sc)
   13943 {
   13944 	uint16_t checksum;
   13945 	uint16_t eeprom_data;
   13946 #ifdef WM_DEBUG
   13947 	uint16_t csum_wordaddr, valid_checksum;
   13948 #endif
   13949 	int i;
   13950 
   13951 	checksum = 0;
   13952 
   13953 	/* Don't check for I211 */
   13954 	if (sc->sc_type == WM_T_I211)
   13955 		return 0;
   13956 
   13957 #ifdef WM_DEBUG
   13958 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13959 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13960 		csum_wordaddr = NVM_OFF_COMPAT;
   13961 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13962 	} else {
   13963 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13964 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13965 	}
   13966 
   13967 	/* Dump EEPROM image for debug */
   13968 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13969 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13970 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13971 		/* XXX PCH_SPT? */
   13972 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13973 		if ((eeprom_data & valid_checksum) == 0)
   13974 			DPRINTF(sc, WM_DEBUG_NVM,
   13975 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13976 				device_xname(sc->sc_dev), eeprom_data,
   13977 				    valid_checksum));
   13978 	}
   13979 
   13980 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   13981 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13982 		for (i = 0; i < NVM_SIZE; i++) {
   13983 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13984 				printf("XXXX ");
   13985 			else
   13986 				printf("%04hx ", eeprom_data);
   13987 			if (i % 8 == 7)
   13988 				printf("\n");
   13989 		}
   13990 	}
   13991 
   13992 #endif /* WM_DEBUG */
   13993 
   13994 	for (i = 0; i < NVM_SIZE; i++) {
   13995 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13996 			return 1;
   13997 		checksum += eeprom_data;
   13998 	}
   13999 
   14000 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14001 #ifdef WM_DEBUG
   14002 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14003 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14004 #endif
   14005 	}
   14006 
   14007 	return 0;
   14008 }
   14009 
   14010 static void
   14011 wm_nvm_version_invm(struct wm_softc *sc)
   14012 {
   14013 	uint32_t dword;
   14014 
   14015 	/*
   14016 	 * Linux's code to decode version is very strange, so we don't
   14017 	 * obey that algorithm and just use word 61 as the document.
   14018 	 * Perhaps it's not perfect though...
   14019 	 *
   14020 	 * Example:
   14021 	 *
   14022 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14023 	 */
   14024 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14025 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14026 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14027 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14028 }
   14029 
   14030 static void
   14031 wm_nvm_version(struct wm_softc *sc)
   14032 {
   14033 	uint16_t major, minor, build, patch;
   14034 	uint16_t uid0, uid1;
   14035 	uint16_t nvm_data;
   14036 	uint16_t off;
   14037 	bool check_version = false;
   14038 	bool check_optionrom = false;
   14039 	bool have_build = false;
   14040 	bool have_uid = true;
   14041 
   14042 	/*
   14043 	 * Version format:
   14044 	 *
   14045 	 * XYYZ
   14046 	 * X0YZ
   14047 	 * X0YY
   14048 	 *
   14049 	 * Example:
   14050 	 *
   14051 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14052 	 *	82571	0x50a6	5.10.6?
   14053 	 *	82572	0x506a	5.6.10?
   14054 	 *	82572EI	0x5069	5.6.9?
   14055 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14056 	 *		0x2013	2.1.3?
   14057 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14058 	 * ICH8+82567	0x0040	0.4.0?
   14059 	 * ICH9+82566	0x1040	1.4.0?
   14060 	 *ICH10+82567	0x0043	0.4.3?
   14061 	 *  PCH+82577	0x00c1	0.12.1?
   14062 	 * PCH2+82579	0x00d3	0.13.3?
   14063 	 *		0x00d4	0.13.4?
   14064 	 *  LPT+I218	0x0023	0.2.3?
   14065 	 *  SPT+I219	0x0084	0.8.4?
   14066 	 *  CNP+I219	0x0054	0.5.4?
   14067 	 */
   14068 
   14069 	/*
   14070 	 * XXX
   14071 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14072 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   14073 	 */
   14074 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14075 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14076 		have_uid = false;
   14077 
   14078 	switch (sc->sc_type) {
   14079 	case WM_T_82571:
   14080 	case WM_T_82572:
   14081 	case WM_T_82574:
   14082 	case WM_T_82583:
   14083 		check_version = true;
   14084 		check_optionrom = true;
   14085 		have_build = true;
   14086 		break;
   14087 	case WM_T_ICH8:
   14088 	case WM_T_ICH9:
   14089 	case WM_T_ICH10:
   14090 	case WM_T_PCH:
   14091 	case WM_T_PCH2:
   14092 	case WM_T_PCH_LPT:
   14093 	case WM_T_PCH_SPT:
   14094 	case WM_T_PCH_CNP:
   14095 		check_version = true;
   14096 		have_build = true;
   14097 		have_uid = false;
   14098 		break;
   14099 	case WM_T_82575:
   14100 	case WM_T_82576:
   14101 	case WM_T_82580:
   14102 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14103 			check_version = true;
   14104 		break;
   14105 	case WM_T_I211:
   14106 		wm_nvm_version_invm(sc);
   14107 		have_uid = false;
   14108 		goto printver;
   14109 	case WM_T_I210:
   14110 		if (!wm_nvm_flash_presence_i210(sc)) {
   14111 			wm_nvm_version_invm(sc);
   14112 			have_uid = false;
   14113 			goto printver;
   14114 		}
   14115 		/* FALLTHROUGH */
   14116 	case WM_T_I350:
   14117 	case WM_T_I354:
   14118 		check_version = true;
   14119 		check_optionrom = true;
   14120 		break;
   14121 	default:
   14122 		return;
   14123 	}
   14124 	if (check_version
   14125 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14126 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14127 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14128 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14129 			build = nvm_data & NVM_BUILD_MASK;
   14130 			have_build = true;
   14131 		} else
   14132 			minor = nvm_data & 0x00ff;
   14133 
   14134 		/* Decimal */
   14135 		minor = (minor / 16) * 10 + (minor % 16);
   14136 		sc->sc_nvm_ver_major = major;
   14137 		sc->sc_nvm_ver_minor = minor;
   14138 
   14139 printver:
   14140 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14141 		    sc->sc_nvm_ver_minor);
   14142 		if (have_build) {
   14143 			sc->sc_nvm_ver_build = build;
   14144 			aprint_verbose(".%d", build);
   14145 		}
   14146 	}
   14147 
   14148 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14149 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14150 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14151 		/* Option ROM Version */
   14152 		if ((off != 0x0000) && (off != 0xffff)) {
   14153 			int rv;
   14154 
   14155 			off += NVM_COMBO_VER_OFF;
   14156 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14157 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14158 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14159 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14160 				/* 16bits */
   14161 				major = uid0 >> 8;
   14162 				build = (uid0 << 8) | (uid1 >> 8);
   14163 				patch = uid1 & 0x00ff;
   14164 				aprint_verbose(", option ROM Version %d.%d.%d",
   14165 				    major, build, patch);
   14166 			}
   14167 		}
   14168 	}
   14169 
   14170 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14171 		aprint_verbose(", Image Unique ID %08x",
   14172 		    ((uint32_t)uid1 << 16) | uid0);
   14173 }
   14174 
   14175 /*
   14176  * wm_nvm_read:
   14177  *
   14178  *	Read data from the serial EEPROM.
   14179  */
   14180 static int
   14181 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14182 {
   14183 	int rv;
   14184 
   14185 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14186 		device_xname(sc->sc_dev), __func__));
   14187 
   14188 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14189 		return -1;
   14190 
   14191 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14192 
   14193 	return rv;
   14194 }
   14195 
   14196 /*
   14197  * Hardware semaphores.
   14198  * Very complexed...
   14199  */
   14200 
   14201 static int
   14202 wm_get_null(struct wm_softc *sc)
   14203 {
   14204 
   14205 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14206 		device_xname(sc->sc_dev), __func__));
   14207 	return 0;
   14208 }
   14209 
   14210 static void
   14211 wm_put_null(struct wm_softc *sc)
   14212 {
   14213 
   14214 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14215 		device_xname(sc->sc_dev), __func__));
   14216 	return;
   14217 }
   14218 
   14219 static int
   14220 wm_get_eecd(struct wm_softc *sc)
   14221 {
   14222 	uint32_t reg;
   14223 	int x;
   14224 
   14225 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14226 		device_xname(sc->sc_dev), __func__));
   14227 
   14228 	reg = CSR_READ(sc, WMREG_EECD);
   14229 
   14230 	/* Request EEPROM access. */
   14231 	reg |= EECD_EE_REQ;
   14232 	CSR_WRITE(sc, WMREG_EECD, reg);
   14233 
   14234 	/* ..and wait for it to be granted. */
   14235 	for (x = 0; x < 1000; x++) {
   14236 		reg = CSR_READ(sc, WMREG_EECD);
   14237 		if (reg & EECD_EE_GNT)
   14238 			break;
   14239 		delay(5);
   14240 	}
   14241 	if ((reg & EECD_EE_GNT) == 0) {
   14242 		aprint_error_dev(sc->sc_dev,
   14243 		    "could not acquire EEPROM GNT\n");
   14244 		reg &= ~EECD_EE_REQ;
   14245 		CSR_WRITE(sc, WMREG_EECD, reg);
   14246 		return -1;
   14247 	}
   14248 
   14249 	return 0;
   14250 }
   14251 
   14252 static void
   14253 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14254 {
   14255 
   14256 	*eecd |= EECD_SK;
   14257 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14258 	CSR_WRITE_FLUSH(sc);
   14259 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14260 		delay(1);
   14261 	else
   14262 		delay(50);
   14263 }
   14264 
   14265 static void
   14266 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14267 {
   14268 
   14269 	*eecd &= ~EECD_SK;
   14270 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14271 	CSR_WRITE_FLUSH(sc);
   14272 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14273 		delay(1);
   14274 	else
   14275 		delay(50);
   14276 }
   14277 
   14278 static void
   14279 wm_put_eecd(struct wm_softc *sc)
   14280 {
   14281 	uint32_t reg;
   14282 
   14283 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14284 		device_xname(sc->sc_dev), __func__));
   14285 
   14286 	/* Stop nvm */
   14287 	reg = CSR_READ(sc, WMREG_EECD);
   14288 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14289 		/* Pull CS high */
   14290 		reg |= EECD_CS;
   14291 		wm_nvm_eec_clock_lower(sc, &reg);
   14292 	} else {
   14293 		/* CS on Microwire is active-high */
   14294 		reg &= ~(EECD_CS | EECD_DI);
   14295 		CSR_WRITE(sc, WMREG_EECD, reg);
   14296 		wm_nvm_eec_clock_raise(sc, &reg);
   14297 		wm_nvm_eec_clock_lower(sc, &reg);
   14298 	}
   14299 
   14300 	reg = CSR_READ(sc, WMREG_EECD);
   14301 	reg &= ~EECD_EE_REQ;
   14302 	CSR_WRITE(sc, WMREG_EECD, reg);
   14303 
   14304 	return;
   14305 }
   14306 
   14307 /*
   14308  * Get hardware semaphore.
   14309  * Same as e1000_get_hw_semaphore_generic()
   14310  */
   14311 static int
   14312 wm_get_swsm_semaphore(struct wm_softc *sc)
   14313 {
   14314 	int32_t timeout;
   14315 	uint32_t swsm;
   14316 
   14317 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14318 		device_xname(sc->sc_dev), __func__));
   14319 	KASSERT(sc->sc_nvm_wordsize > 0);
   14320 
   14321 retry:
   14322 	/* Get the SW semaphore. */
   14323 	timeout = sc->sc_nvm_wordsize + 1;
   14324 	while (timeout) {
   14325 		swsm = CSR_READ(sc, WMREG_SWSM);
   14326 
   14327 		if ((swsm & SWSM_SMBI) == 0)
   14328 			break;
   14329 
   14330 		delay(50);
   14331 		timeout--;
   14332 	}
   14333 
   14334 	if (timeout == 0) {
   14335 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14336 			/*
   14337 			 * In rare circumstances, the SW semaphore may already
   14338 			 * be held unintentionally. Clear the semaphore once
   14339 			 * before giving up.
   14340 			 */
   14341 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14342 			wm_put_swsm_semaphore(sc);
   14343 			goto retry;
   14344 		}
   14345 		aprint_error_dev(sc->sc_dev,
   14346 		    "could not acquire SWSM SMBI\n");
   14347 		return 1;
   14348 	}
   14349 
   14350 	/* Get the FW semaphore. */
   14351 	timeout = sc->sc_nvm_wordsize + 1;
   14352 	while (timeout) {
   14353 		swsm = CSR_READ(sc, WMREG_SWSM);
   14354 		swsm |= SWSM_SWESMBI;
   14355 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14356 		/* If we managed to set the bit we got the semaphore. */
   14357 		swsm = CSR_READ(sc, WMREG_SWSM);
   14358 		if (swsm & SWSM_SWESMBI)
   14359 			break;
   14360 
   14361 		delay(50);
   14362 		timeout--;
   14363 	}
   14364 
   14365 	if (timeout == 0) {
   14366 		aprint_error_dev(sc->sc_dev,
   14367 		    "could not acquire SWSM SWESMBI\n");
   14368 		/* Release semaphores */
   14369 		wm_put_swsm_semaphore(sc);
   14370 		return 1;
   14371 	}
   14372 	return 0;
   14373 }
   14374 
   14375 /*
   14376  * Put hardware semaphore.
   14377  * Same as e1000_put_hw_semaphore_generic()
   14378  */
   14379 static void
   14380 wm_put_swsm_semaphore(struct wm_softc *sc)
   14381 {
   14382 	uint32_t swsm;
   14383 
   14384 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14385 		device_xname(sc->sc_dev), __func__));
   14386 
   14387 	swsm = CSR_READ(sc, WMREG_SWSM);
   14388 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14389 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14390 }
   14391 
   14392 /*
   14393  * Get SW/FW semaphore.
   14394  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14395  */
   14396 static int
   14397 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14398 {
   14399 	uint32_t swfw_sync;
   14400 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14401 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14402 	int timeout;
   14403 
   14404 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14405 		device_xname(sc->sc_dev), __func__));
   14406 
   14407 	if (sc->sc_type == WM_T_80003)
   14408 		timeout = 50;
   14409 	else
   14410 		timeout = 200;
   14411 
   14412 	while (timeout) {
   14413 		if (wm_get_swsm_semaphore(sc)) {
   14414 			aprint_error_dev(sc->sc_dev,
   14415 			    "%s: failed to get semaphore\n",
   14416 			    __func__);
   14417 			return 1;
   14418 		}
   14419 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14420 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14421 			swfw_sync |= swmask;
   14422 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14423 			wm_put_swsm_semaphore(sc);
   14424 			return 0;
   14425 		}
   14426 		wm_put_swsm_semaphore(sc);
   14427 		delay(5000);
   14428 		timeout--;
   14429 	}
   14430 	device_printf(sc->sc_dev,
   14431 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14432 	    mask, swfw_sync);
   14433 	return 1;
   14434 }
   14435 
   14436 static void
   14437 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14438 {
   14439 	uint32_t swfw_sync;
   14440 
   14441 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14442 		device_xname(sc->sc_dev), __func__));
   14443 
   14444 	while (wm_get_swsm_semaphore(sc) != 0)
   14445 		continue;
   14446 
   14447 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14448 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14449 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14450 
   14451 	wm_put_swsm_semaphore(sc);
   14452 }
   14453 
   14454 static int
   14455 wm_get_nvm_80003(struct wm_softc *sc)
   14456 {
   14457 	int rv;
   14458 
   14459 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14460 		device_xname(sc->sc_dev), __func__));
   14461 
   14462 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14463 		aprint_error_dev(sc->sc_dev,
   14464 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14465 		return rv;
   14466 	}
   14467 
   14468 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14469 	    && (rv = wm_get_eecd(sc)) != 0) {
   14470 		aprint_error_dev(sc->sc_dev,
   14471 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14472 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14473 		return rv;
   14474 	}
   14475 
   14476 	return 0;
   14477 }
   14478 
   14479 static void
   14480 wm_put_nvm_80003(struct wm_softc *sc)
   14481 {
   14482 
   14483 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14484 		device_xname(sc->sc_dev), __func__));
   14485 
   14486 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14487 		wm_put_eecd(sc);
   14488 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14489 }
   14490 
   14491 static int
   14492 wm_get_nvm_82571(struct wm_softc *sc)
   14493 {
   14494 	int rv;
   14495 
   14496 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14497 		device_xname(sc->sc_dev), __func__));
   14498 
   14499 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14500 		return rv;
   14501 
   14502 	switch (sc->sc_type) {
   14503 	case WM_T_82573:
   14504 		break;
   14505 	default:
   14506 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14507 			rv = wm_get_eecd(sc);
   14508 		break;
   14509 	}
   14510 
   14511 	if (rv != 0) {
   14512 		aprint_error_dev(sc->sc_dev,
   14513 		    "%s: failed to get semaphore\n",
   14514 		    __func__);
   14515 		wm_put_swsm_semaphore(sc);
   14516 	}
   14517 
   14518 	return rv;
   14519 }
   14520 
   14521 static void
   14522 wm_put_nvm_82571(struct wm_softc *sc)
   14523 {
   14524 
   14525 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14526 		device_xname(sc->sc_dev), __func__));
   14527 
   14528 	switch (sc->sc_type) {
   14529 	case WM_T_82573:
   14530 		break;
   14531 	default:
   14532 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14533 			wm_put_eecd(sc);
   14534 		break;
   14535 	}
   14536 
   14537 	wm_put_swsm_semaphore(sc);
   14538 }
   14539 
   14540 static int
   14541 wm_get_phy_82575(struct wm_softc *sc)
   14542 {
   14543 
   14544 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14545 		device_xname(sc->sc_dev), __func__));
   14546 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14547 }
   14548 
   14549 static void
   14550 wm_put_phy_82575(struct wm_softc *sc)
   14551 {
   14552 
   14553 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14554 		device_xname(sc->sc_dev), __func__));
   14555 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14556 }
   14557 
   14558 static int
   14559 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14560 {
   14561 	uint32_t ext_ctrl;
   14562 	int timeout = 200;
   14563 
   14564 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14565 		device_xname(sc->sc_dev), __func__));
   14566 
   14567 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14568 	for (timeout = 0; timeout < 200; timeout++) {
   14569 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14570 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14571 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14572 
   14573 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14574 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14575 			return 0;
   14576 		delay(5000);
   14577 	}
   14578 	device_printf(sc->sc_dev,
   14579 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14580 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14581 	return 1;
   14582 }
   14583 
   14584 static void
   14585 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14586 {
   14587 	uint32_t ext_ctrl;
   14588 
   14589 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14590 		device_xname(sc->sc_dev), __func__));
   14591 
   14592 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14593 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14594 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14595 
   14596 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14597 }
   14598 
   14599 static int
   14600 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14601 {
   14602 	uint32_t ext_ctrl;
   14603 	int timeout;
   14604 
   14605 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14606 		device_xname(sc->sc_dev), __func__));
   14607 	mutex_enter(sc->sc_ich_phymtx);
   14608 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14609 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14610 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14611 			break;
   14612 		delay(1000);
   14613 	}
   14614 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14615 		device_printf(sc->sc_dev,
   14616 		    "SW has already locked the resource\n");
   14617 		goto out;
   14618 	}
   14619 
   14620 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14621 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14622 	for (timeout = 0; timeout < 1000; timeout++) {
   14623 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14624 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14625 			break;
   14626 		delay(1000);
   14627 	}
   14628 	if (timeout >= 1000) {
   14629 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14630 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14631 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14632 		goto out;
   14633 	}
   14634 	return 0;
   14635 
   14636 out:
   14637 	mutex_exit(sc->sc_ich_phymtx);
   14638 	return 1;
   14639 }
   14640 
   14641 static void
   14642 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14643 {
   14644 	uint32_t ext_ctrl;
   14645 
   14646 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14647 		device_xname(sc->sc_dev), __func__));
   14648 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14649 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14650 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14651 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14652 	} else {
   14653 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14654 	}
   14655 
   14656 	mutex_exit(sc->sc_ich_phymtx);
   14657 }
   14658 
   14659 static int
   14660 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14661 {
   14662 
   14663 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14664 		device_xname(sc->sc_dev), __func__));
   14665 	mutex_enter(sc->sc_ich_nvmmtx);
   14666 
   14667 	return 0;
   14668 }
   14669 
   14670 static void
   14671 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14672 {
   14673 
   14674 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14675 		device_xname(sc->sc_dev), __func__));
   14676 	mutex_exit(sc->sc_ich_nvmmtx);
   14677 }
   14678 
   14679 static int
   14680 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14681 {
   14682 	int i = 0;
   14683 	uint32_t reg;
   14684 
   14685 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14686 		device_xname(sc->sc_dev), __func__));
   14687 
   14688 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14689 	do {
   14690 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14691 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14692 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14693 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14694 			break;
   14695 		delay(2*1000);
   14696 		i++;
   14697 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14698 
   14699 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14700 		wm_put_hw_semaphore_82573(sc);
   14701 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14702 		    device_xname(sc->sc_dev));
   14703 		return -1;
   14704 	}
   14705 
   14706 	return 0;
   14707 }
   14708 
   14709 static void
   14710 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14711 {
   14712 	uint32_t reg;
   14713 
   14714 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14715 		device_xname(sc->sc_dev), __func__));
   14716 
   14717 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14718 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14719 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14720 }
   14721 
   14722 /*
   14723  * Management mode and power management related subroutines.
   14724  * BMC, AMT, suspend/resume and EEE.
   14725  */
   14726 
   14727 #ifdef WM_WOL
   14728 static int
   14729 wm_check_mng_mode(struct wm_softc *sc)
   14730 {
   14731 	int rv;
   14732 
   14733 	switch (sc->sc_type) {
   14734 	case WM_T_ICH8:
   14735 	case WM_T_ICH9:
   14736 	case WM_T_ICH10:
   14737 	case WM_T_PCH:
   14738 	case WM_T_PCH2:
   14739 	case WM_T_PCH_LPT:
   14740 	case WM_T_PCH_SPT:
   14741 	case WM_T_PCH_CNP:
   14742 		rv = wm_check_mng_mode_ich8lan(sc);
   14743 		break;
   14744 	case WM_T_82574:
   14745 	case WM_T_82583:
   14746 		rv = wm_check_mng_mode_82574(sc);
   14747 		break;
   14748 	case WM_T_82571:
   14749 	case WM_T_82572:
   14750 	case WM_T_82573:
   14751 	case WM_T_80003:
   14752 		rv = wm_check_mng_mode_generic(sc);
   14753 		break;
   14754 	default:
   14755 		/* Noting to do */
   14756 		rv = 0;
   14757 		break;
   14758 	}
   14759 
   14760 	return rv;
   14761 }
   14762 
   14763 static int
   14764 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14765 {
   14766 	uint32_t fwsm;
   14767 
   14768 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14769 
   14770 	if (((fwsm & FWSM_FW_VALID) != 0)
   14771 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14772 		return 1;
   14773 
   14774 	return 0;
   14775 }
   14776 
   14777 static int
   14778 wm_check_mng_mode_82574(struct wm_softc *sc)
   14779 {
   14780 	uint16_t data;
   14781 
   14782 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14783 
   14784 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14785 		return 1;
   14786 
   14787 	return 0;
   14788 }
   14789 
   14790 static int
   14791 wm_check_mng_mode_generic(struct wm_softc *sc)
   14792 {
   14793 	uint32_t fwsm;
   14794 
   14795 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14796 
   14797 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14798 		return 1;
   14799 
   14800 	return 0;
   14801 }
   14802 #endif /* WM_WOL */
   14803 
   14804 static int
   14805 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14806 {
   14807 	uint32_t manc, fwsm, factps;
   14808 
   14809 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14810 		return 0;
   14811 
   14812 	manc = CSR_READ(sc, WMREG_MANC);
   14813 
   14814 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14815 		device_xname(sc->sc_dev), manc));
   14816 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14817 		return 0;
   14818 
   14819 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14820 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14821 		factps = CSR_READ(sc, WMREG_FACTPS);
   14822 		if (((factps & FACTPS_MNGCG) == 0)
   14823 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14824 			return 1;
   14825 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14826 		uint16_t data;
   14827 
   14828 		factps = CSR_READ(sc, WMREG_FACTPS);
   14829 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14830 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14831 			device_xname(sc->sc_dev), factps, data));
   14832 		if (((factps & FACTPS_MNGCG) == 0)
   14833 		    && ((data & NVM_CFG2_MNGM_MASK)
   14834 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14835 			return 1;
   14836 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14837 	    && ((manc & MANC_ASF_EN) == 0))
   14838 		return 1;
   14839 
   14840 	return 0;
   14841 }
   14842 
   14843 static bool
   14844 wm_phy_resetisblocked(struct wm_softc *sc)
   14845 {
   14846 	bool blocked = false;
   14847 	uint32_t reg;
   14848 	int i = 0;
   14849 
   14850 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14851 		device_xname(sc->sc_dev), __func__));
   14852 
   14853 	switch (sc->sc_type) {
   14854 	case WM_T_ICH8:
   14855 	case WM_T_ICH9:
   14856 	case WM_T_ICH10:
   14857 	case WM_T_PCH:
   14858 	case WM_T_PCH2:
   14859 	case WM_T_PCH_LPT:
   14860 	case WM_T_PCH_SPT:
   14861 	case WM_T_PCH_CNP:
   14862 		do {
   14863 			reg = CSR_READ(sc, WMREG_FWSM);
   14864 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14865 				blocked = true;
   14866 				delay(10*1000);
   14867 				continue;
   14868 			}
   14869 			blocked = false;
   14870 		} while (blocked && (i++ < 30));
   14871 		return blocked;
   14872 		break;
   14873 	case WM_T_82571:
   14874 	case WM_T_82572:
   14875 	case WM_T_82573:
   14876 	case WM_T_82574:
   14877 	case WM_T_82583:
   14878 	case WM_T_80003:
   14879 		reg = CSR_READ(sc, WMREG_MANC);
   14880 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14881 			return true;
   14882 		else
   14883 			return false;
   14884 		break;
   14885 	default:
   14886 		/* No problem */
   14887 		break;
   14888 	}
   14889 
   14890 	return false;
   14891 }
   14892 
   14893 static void
   14894 wm_get_hw_control(struct wm_softc *sc)
   14895 {
   14896 	uint32_t reg;
   14897 
   14898 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14899 		device_xname(sc->sc_dev), __func__));
   14900 
   14901 	if (sc->sc_type == WM_T_82573) {
   14902 		reg = CSR_READ(sc, WMREG_SWSM);
   14903 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14904 	} else if (sc->sc_type >= WM_T_82571) {
   14905 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14906 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14907 	}
   14908 }
   14909 
   14910 static void
   14911 wm_release_hw_control(struct wm_softc *sc)
   14912 {
   14913 	uint32_t reg;
   14914 
   14915 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14916 		device_xname(sc->sc_dev), __func__));
   14917 
   14918 	if (sc->sc_type == WM_T_82573) {
   14919 		reg = CSR_READ(sc, WMREG_SWSM);
   14920 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14921 	} else if (sc->sc_type >= WM_T_82571) {
   14922 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14923 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14924 	}
   14925 }
   14926 
   14927 static void
   14928 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14929 {
   14930 	uint32_t reg;
   14931 
   14932 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14933 		device_xname(sc->sc_dev), __func__));
   14934 
   14935 	if (sc->sc_type < WM_T_PCH2)
   14936 		return;
   14937 
   14938 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14939 
   14940 	if (gate)
   14941 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14942 	else
   14943 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14944 
   14945 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14946 }
   14947 
   14948 static int
   14949 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14950 {
   14951 	uint32_t fwsm, reg;
   14952 	int rv = 0;
   14953 
   14954 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14955 		device_xname(sc->sc_dev), __func__));
   14956 
   14957 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14958 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14959 
   14960 	/* Disable ULP */
   14961 	wm_ulp_disable(sc);
   14962 
   14963 	/* Acquire PHY semaphore */
   14964 	rv = sc->phy.acquire(sc);
   14965 	if (rv != 0) {
   14966 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   14967 		device_xname(sc->sc_dev), __func__));
   14968 		return -1;
   14969 	}
   14970 
   14971 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14972 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14973 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14974 	 */
   14975 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14976 	switch (sc->sc_type) {
   14977 	case WM_T_PCH_LPT:
   14978 	case WM_T_PCH_SPT:
   14979 	case WM_T_PCH_CNP:
   14980 		if (wm_phy_is_accessible_pchlan(sc))
   14981 			break;
   14982 
   14983 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14984 		 * forcing MAC to SMBus mode first.
   14985 		 */
   14986 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14987 		reg |= CTRL_EXT_FORCE_SMBUS;
   14988 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14989 #if 0
   14990 		/* XXX Isn't this required??? */
   14991 		CSR_WRITE_FLUSH(sc);
   14992 #endif
   14993 		/* Wait 50 milliseconds for MAC to finish any retries
   14994 		 * that it might be trying to perform from previous
   14995 		 * attempts to acknowledge any phy read requests.
   14996 		 */
   14997 		delay(50 * 1000);
   14998 		/* FALLTHROUGH */
   14999 	case WM_T_PCH2:
   15000 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15001 			break;
   15002 		/* FALLTHROUGH */
   15003 	case WM_T_PCH:
   15004 		if (sc->sc_type == WM_T_PCH)
   15005 			if ((fwsm & FWSM_FW_VALID) != 0)
   15006 				break;
   15007 
   15008 		if (wm_phy_resetisblocked(sc) == true) {
   15009 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15010 			break;
   15011 		}
   15012 
   15013 		/* Toggle LANPHYPC Value bit */
   15014 		wm_toggle_lanphypc_pch_lpt(sc);
   15015 
   15016 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15017 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15018 				break;
   15019 
   15020 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15021 			 * so ensure that the MAC is also out of SMBus mode
   15022 			 */
   15023 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15024 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15025 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15026 
   15027 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15028 				break;
   15029 			rv = -1;
   15030 		}
   15031 		break;
   15032 	default:
   15033 		break;
   15034 	}
   15035 
   15036 	/* Release semaphore */
   15037 	sc->phy.release(sc);
   15038 
   15039 	if (rv == 0) {
   15040 		/* Check to see if able to reset PHY.  Print error if not */
   15041 		if (wm_phy_resetisblocked(sc)) {
   15042 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15043 			goto out;
   15044 		}
   15045 
   15046 		/* Reset the PHY before any access to it.  Doing so, ensures
   15047 		 * that the PHY is in a known good state before we read/write
   15048 		 * PHY registers.  The generic reset is sufficient here,
   15049 		 * because we haven't determined the PHY type yet.
   15050 		 */
   15051 		if (wm_reset_phy(sc) != 0)
   15052 			goto out;
   15053 
   15054 		/* On a successful reset, possibly need to wait for the PHY
   15055 		 * to quiesce to an accessible state before returning control
   15056 		 * to the calling function.  If the PHY does not quiesce, then
   15057 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15058 		 *  the PHY is in.
   15059 		 */
   15060 		if (wm_phy_resetisblocked(sc))
   15061 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15062 	}
   15063 
   15064 out:
   15065 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15066 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15067 		delay(10*1000);
   15068 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15069 	}
   15070 
   15071 	return 0;
   15072 }
   15073 
   15074 static void
   15075 wm_init_manageability(struct wm_softc *sc)
   15076 {
   15077 
   15078 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15079 		device_xname(sc->sc_dev), __func__));
   15080 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15081 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15082 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15083 
   15084 		/* Disable hardware interception of ARP */
   15085 		manc &= ~MANC_ARP_EN;
   15086 
   15087 		/* Enable receiving management packets to the host */
   15088 		if (sc->sc_type >= WM_T_82571) {
   15089 			manc |= MANC_EN_MNG2HOST;
   15090 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15091 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15092 		}
   15093 
   15094 		CSR_WRITE(sc, WMREG_MANC, manc);
   15095 	}
   15096 }
   15097 
   15098 static void
   15099 wm_release_manageability(struct wm_softc *sc)
   15100 {
   15101 
   15102 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15103 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15104 
   15105 		manc |= MANC_ARP_EN;
   15106 		if (sc->sc_type >= WM_T_82571)
   15107 			manc &= ~MANC_EN_MNG2HOST;
   15108 
   15109 		CSR_WRITE(sc, WMREG_MANC, manc);
   15110 	}
   15111 }
   15112 
   15113 static void
   15114 wm_get_wakeup(struct wm_softc *sc)
   15115 {
   15116 
   15117 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15118 	switch (sc->sc_type) {
   15119 	case WM_T_82573:
   15120 	case WM_T_82583:
   15121 		sc->sc_flags |= WM_F_HAS_AMT;
   15122 		/* FALLTHROUGH */
   15123 	case WM_T_80003:
   15124 	case WM_T_82575:
   15125 	case WM_T_82576:
   15126 	case WM_T_82580:
   15127 	case WM_T_I350:
   15128 	case WM_T_I354:
   15129 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15130 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15131 		/* FALLTHROUGH */
   15132 	case WM_T_82541:
   15133 	case WM_T_82541_2:
   15134 	case WM_T_82547:
   15135 	case WM_T_82547_2:
   15136 	case WM_T_82571:
   15137 	case WM_T_82572:
   15138 	case WM_T_82574:
   15139 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15140 		break;
   15141 	case WM_T_ICH8:
   15142 	case WM_T_ICH9:
   15143 	case WM_T_ICH10:
   15144 	case WM_T_PCH:
   15145 	case WM_T_PCH2:
   15146 	case WM_T_PCH_LPT:
   15147 	case WM_T_PCH_SPT:
   15148 	case WM_T_PCH_CNP:
   15149 		sc->sc_flags |= WM_F_HAS_AMT;
   15150 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15151 		break;
   15152 	default:
   15153 		break;
   15154 	}
   15155 
   15156 	/* 1: HAS_MANAGE */
   15157 	if (wm_enable_mng_pass_thru(sc) != 0)
   15158 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15159 
   15160 	/*
   15161 	 * Note that the WOL flags is set after the resetting of the eeprom
   15162 	 * stuff
   15163 	 */
   15164 }
   15165 
   15166 /*
   15167  * Unconfigure Ultra Low Power mode.
   15168  * Only for I217 and newer (see below).
   15169  */
   15170 static int
   15171 wm_ulp_disable(struct wm_softc *sc)
   15172 {
   15173 	uint32_t reg;
   15174 	uint16_t phyreg;
   15175 	int i = 0, rv = 0;
   15176 
   15177 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15178 		device_xname(sc->sc_dev), __func__));
   15179 	/* Exclude old devices */
   15180 	if ((sc->sc_type < WM_T_PCH_LPT)
   15181 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15182 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15183 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15184 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15185 		return 0;
   15186 
   15187 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15188 		/* Request ME un-configure ULP mode in the PHY */
   15189 		reg = CSR_READ(sc, WMREG_H2ME);
   15190 		reg &= ~H2ME_ULP;
   15191 		reg |= H2ME_ENFORCE_SETTINGS;
   15192 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15193 
   15194 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15195 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15196 			if (i++ == 30) {
   15197 				device_printf(sc->sc_dev, "%s timed out\n",
   15198 				    __func__);
   15199 				return -1;
   15200 			}
   15201 			delay(10 * 1000);
   15202 		}
   15203 		reg = CSR_READ(sc, WMREG_H2ME);
   15204 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15205 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15206 
   15207 		return 0;
   15208 	}
   15209 
   15210 	/* Acquire semaphore */
   15211 	rv = sc->phy.acquire(sc);
   15212 	if (rv != 0) {
   15213 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15214 		device_xname(sc->sc_dev), __func__));
   15215 		return -1;
   15216 	}
   15217 
   15218 	/* Toggle LANPHYPC */
   15219 	wm_toggle_lanphypc_pch_lpt(sc);
   15220 
   15221 	/* Unforce SMBus mode in PHY */
   15222 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15223 	if (rv != 0) {
   15224 		uint32_t reg2;
   15225 
   15226 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15227 			__func__);
   15228 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15229 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15230 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15231 		delay(50 * 1000);
   15232 
   15233 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15234 		    &phyreg);
   15235 		if (rv != 0)
   15236 			goto release;
   15237 	}
   15238 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15239 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15240 
   15241 	/* Unforce SMBus mode in MAC */
   15242 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15243 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15244 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15245 
   15246 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15247 	if (rv != 0)
   15248 		goto release;
   15249 	phyreg |= HV_PM_CTRL_K1_ENA;
   15250 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15251 
   15252 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15253 		&phyreg);
   15254 	if (rv != 0)
   15255 		goto release;
   15256 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15257 	    | I218_ULP_CONFIG1_STICKY_ULP
   15258 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15259 	    | I218_ULP_CONFIG1_WOL_HOST
   15260 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15261 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15262 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15263 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15264 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15265 	phyreg |= I218_ULP_CONFIG1_START;
   15266 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15267 
   15268 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15269 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15270 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15271 
   15272 release:
   15273 	/* Release semaphore */
   15274 	sc->phy.release(sc);
   15275 	wm_gmii_reset(sc);
   15276 	delay(50 * 1000);
   15277 
   15278 	return rv;
   15279 }
   15280 
   15281 /* WOL in the newer chipset interfaces (pchlan) */
   15282 static int
   15283 wm_enable_phy_wakeup(struct wm_softc *sc)
   15284 {
   15285 	device_t dev = sc->sc_dev;
   15286 	uint32_t mreg, moff;
   15287 	uint16_t wuce, wuc, wufc, preg;
   15288 	int i, rv;
   15289 
   15290 	KASSERT(sc->sc_type >= WM_T_PCH);
   15291 
   15292 	/* Copy MAC RARs to PHY RARs */
   15293 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15294 
   15295 	/* Activate PHY wakeup */
   15296 	rv = sc->phy.acquire(sc);
   15297 	if (rv != 0) {
   15298 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15299 		    __func__);
   15300 		return rv;
   15301 	}
   15302 
   15303 	/*
   15304 	 * Enable access to PHY wakeup registers.
   15305 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15306 	 */
   15307 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15308 	if (rv != 0) {
   15309 		device_printf(dev,
   15310 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15311 		goto release;
   15312 	}
   15313 
   15314 	/* Copy MAC MTA to PHY MTA */
   15315 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15316 		uint16_t lo, hi;
   15317 
   15318 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15319 		lo = (uint16_t)(mreg & 0xffff);
   15320 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15321 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15322 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15323 	}
   15324 
   15325 	/* Configure PHY Rx Control register */
   15326 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15327 	mreg = CSR_READ(sc, WMREG_RCTL);
   15328 	if (mreg & RCTL_UPE)
   15329 		preg |= BM_RCTL_UPE;
   15330 	if (mreg & RCTL_MPE)
   15331 		preg |= BM_RCTL_MPE;
   15332 	preg &= ~(BM_RCTL_MO_MASK);
   15333 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15334 	if (moff != 0)
   15335 		preg |= moff << BM_RCTL_MO_SHIFT;
   15336 	if (mreg & RCTL_BAM)
   15337 		preg |= BM_RCTL_BAM;
   15338 	if (mreg & RCTL_PMCF)
   15339 		preg |= BM_RCTL_PMCF;
   15340 	mreg = CSR_READ(sc, WMREG_CTRL);
   15341 	if (mreg & CTRL_RFCE)
   15342 		preg |= BM_RCTL_RFCE;
   15343 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15344 
   15345 	wuc = WUC_APME | WUC_PME_EN;
   15346 	wufc = WUFC_MAG;
   15347 	/* Enable PHY wakeup in MAC register */
   15348 	CSR_WRITE(sc, WMREG_WUC,
   15349 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15350 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15351 
   15352 	/* Configure and enable PHY wakeup in PHY registers */
   15353 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15354 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15355 
   15356 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15357 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15358 
   15359 release:
   15360 	sc->phy.release(sc);
   15361 
   15362 	return 0;
   15363 }
   15364 
   15365 /* Power down workaround on D3 */
   15366 static void
   15367 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15368 {
   15369 	uint32_t reg;
   15370 	uint16_t phyreg;
   15371 	int i;
   15372 
   15373 	for (i = 0; i < 2; i++) {
   15374 		/* Disable link */
   15375 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15376 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15377 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15378 
   15379 		/*
   15380 		 * Call gig speed drop workaround on Gig disable before
   15381 		 * accessing any PHY registers
   15382 		 */
   15383 		if (sc->sc_type == WM_T_ICH8)
   15384 			wm_gig_downshift_workaround_ich8lan(sc);
   15385 
   15386 		/* Write VR power-down enable */
   15387 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15388 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15389 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15390 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15391 
   15392 		/* Read it back and test */
   15393 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15394 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15395 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15396 			break;
   15397 
   15398 		/* Issue PHY reset and repeat at most one more time */
   15399 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15400 	}
   15401 }
   15402 
   15403 /*
   15404  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15405  *  @sc: pointer to the HW structure
   15406  *
   15407  *  During S0 to Sx transition, it is possible the link remains at gig
   15408  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15409  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15410  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15411  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15412  *  needs to be written.
   15413  *  Parts that support (and are linked to a partner which support) EEE in
   15414  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15415  *  than 10Mbps w/o EEE.
   15416  */
   15417 static void
   15418 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15419 {
   15420 	device_t dev = sc->sc_dev;
   15421 	struct ethercom *ec = &sc->sc_ethercom;
   15422 	uint32_t phy_ctrl;
   15423 	int rv;
   15424 
   15425 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15426 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15427 
   15428 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15429 
   15430 	if (sc->sc_phytype == WMPHY_I217) {
   15431 		uint16_t devid = sc->sc_pcidevid;
   15432 
   15433 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15434 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15435 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15436 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15437 		    (sc->sc_type >= WM_T_PCH_SPT))
   15438 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15439 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15440 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15441 
   15442 		if (sc->phy.acquire(sc) != 0)
   15443 			goto out;
   15444 
   15445 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15446 			uint16_t eee_advert;
   15447 
   15448 			rv = wm_read_emi_reg_locked(dev,
   15449 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15450 			if (rv)
   15451 				goto release;
   15452 
   15453 			/*
   15454 			 * Disable LPLU if both link partners support 100BaseT
   15455 			 * EEE and 100Full is advertised on both ends of the
   15456 			 * link, and enable Auto Enable LPI since there will
   15457 			 * be no driver to enable LPI while in Sx.
   15458 			 */
   15459 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15460 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15461 				uint16_t anar, phy_reg;
   15462 
   15463 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15464 				    &anar);
   15465 				if (anar & ANAR_TX_FD) {
   15466 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15467 					    PHY_CTRL_NOND0A_LPLU);
   15468 
   15469 					/* Set Auto Enable LPI after link up */
   15470 					sc->phy.readreg_locked(dev, 2,
   15471 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15472 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15473 					sc->phy.writereg_locked(dev, 2,
   15474 					    I217_LPI_GPIO_CTRL, phy_reg);
   15475 				}
   15476 			}
   15477 		}
   15478 
   15479 		/*
   15480 		 * For i217 Intel Rapid Start Technology support,
   15481 		 * when the system is going into Sx and no manageability engine
   15482 		 * is present, the driver must configure proxy to reset only on
   15483 		 * power good.	LPI (Low Power Idle) state must also reset only
   15484 		 * on power good, as well as the MTA (Multicast table array).
   15485 		 * The SMBus release must also be disabled on LCD reset.
   15486 		 */
   15487 
   15488 		/*
   15489 		 * Enable MTA to reset for Intel Rapid Start Technology
   15490 		 * Support
   15491 		 */
   15492 
   15493 release:
   15494 		sc->phy.release(sc);
   15495 	}
   15496 out:
   15497 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15498 
   15499 	if (sc->sc_type == WM_T_ICH8)
   15500 		wm_gig_downshift_workaround_ich8lan(sc);
   15501 
   15502 	if (sc->sc_type >= WM_T_PCH) {
   15503 		wm_oem_bits_config_ich8lan(sc, false);
   15504 
   15505 		/* Reset PHY to activate OEM bits on 82577/8 */
   15506 		if (sc->sc_type == WM_T_PCH)
   15507 			wm_reset_phy(sc);
   15508 
   15509 		if (sc->phy.acquire(sc) != 0)
   15510 			return;
   15511 		wm_write_smbus_addr(sc);
   15512 		sc->phy.release(sc);
   15513 	}
   15514 }
   15515 
   15516 /*
   15517  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15518  *  @sc: pointer to the HW structure
   15519  *
   15520  *  During Sx to S0 transitions on non-managed devices or managed devices
   15521  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15522  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15523  *  the PHY.
   15524  *  On i217, setup Intel Rapid Start Technology.
   15525  */
   15526 static int
   15527 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15528 {
   15529 	device_t dev = sc->sc_dev;
   15530 	int rv;
   15531 
   15532 	if (sc->sc_type < WM_T_PCH2)
   15533 		return 0;
   15534 
   15535 	rv = wm_init_phy_workarounds_pchlan(sc);
   15536 	if (rv != 0)
   15537 		return -1;
   15538 
   15539 	/* For i217 Intel Rapid Start Technology support when the system
   15540 	 * is transitioning from Sx and no manageability engine is present
   15541 	 * configure SMBus to restore on reset, disable proxy, and enable
   15542 	 * the reset on MTA (Multicast table array).
   15543 	 */
   15544 	if (sc->sc_phytype == WMPHY_I217) {
   15545 		uint16_t phy_reg;
   15546 
   15547 		if (sc->phy.acquire(sc) != 0)
   15548 			return -1;
   15549 
   15550 		/* Clear Auto Enable LPI after link up */
   15551 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15552 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15553 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15554 
   15555 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15556 			/* Restore clear on SMB if no manageability engine
   15557 			 * is present
   15558 			 */
   15559 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15560 			    &phy_reg);
   15561 			if (rv != 0)
   15562 				goto release;
   15563 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15564 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15565 
   15566 			/* Disable Proxy */
   15567 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15568 		}
   15569 		/* Enable reset on MTA */
   15570 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15571 		if (rv != 0)
   15572 			goto release;
   15573 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15574 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15575 
   15576 release:
   15577 		sc->phy.release(sc);
   15578 		return rv;
   15579 	}
   15580 
   15581 	return 0;
   15582 }
   15583 
   15584 static void
   15585 wm_enable_wakeup(struct wm_softc *sc)
   15586 {
   15587 	uint32_t reg, pmreg;
   15588 	pcireg_t pmode;
   15589 	int rv = 0;
   15590 
   15591 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15592 		device_xname(sc->sc_dev), __func__));
   15593 
   15594 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15595 	    &pmreg, NULL) == 0)
   15596 		return;
   15597 
   15598 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15599 		goto pme;
   15600 
   15601 	/* Advertise the wakeup capability */
   15602 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15603 	    | CTRL_SWDPIN(3));
   15604 
   15605 	/* Keep the laser running on fiber adapters */
   15606 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15607 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15608 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15609 		reg |= CTRL_EXT_SWDPIN(3);
   15610 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15611 	}
   15612 
   15613 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15614 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15615 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15616 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15617 		wm_suspend_workarounds_ich8lan(sc);
   15618 
   15619 #if 0	/* For the multicast packet */
   15620 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15621 	reg |= WUFC_MC;
   15622 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15623 #endif
   15624 
   15625 	if (sc->sc_type >= WM_T_PCH) {
   15626 		rv = wm_enable_phy_wakeup(sc);
   15627 		if (rv != 0)
   15628 			goto pme;
   15629 	} else {
   15630 		/* Enable wakeup by the MAC */
   15631 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15632 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15633 	}
   15634 
   15635 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15636 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15637 		|| (sc->sc_type == WM_T_PCH2))
   15638 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15639 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15640 
   15641 pme:
   15642 	/* Request PME */
   15643 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15644 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   15645 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15646 		/* For WOL */
   15647 		pmode |= PCI_PMCSR_PME_EN;
   15648 	} else {
   15649 		/* Disable WOL */
   15650 		pmode &= ~PCI_PMCSR_PME_EN;
   15651 	}
   15652 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15653 }
   15654 
   15655 /* Disable ASPM L0s and/or L1 for workaround */
   15656 static void
   15657 wm_disable_aspm(struct wm_softc *sc)
   15658 {
   15659 	pcireg_t reg, mask = 0;
   15660 	unsigned const char *str = "";
   15661 
   15662 	/*
   15663 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15664 	 * space.
   15665 	 */
   15666 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15667 		return;
   15668 
   15669 	switch (sc->sc_type) {
   15670 	case WM_T_82571:
   15671 	case WM_T_82572:
   15672 		/*
   15673 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15674 		 * State Power management L1 State (ASPM L1).
   15675 		 */
   15676 		mask = PCIE_LCSR_ASPM_L1;
   15677 		str = "L1 is";
   15678 		break;
   15679 	case WM_T_82573:
   15680 	case WM_T_82574:
   15681 	case WM_T_82583:
   15682 		/*
   15683 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15684 		 *
   15685 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15686 		 * some chipset.  The document of 82574 and 82583 says that
   15687 		 * disabling L0s with some specific chipset is sufficient,
   15688 		 * but we follow as of the Intel em driver does.
   15689 		 *
   15690 		 * References:
   15691 		 * Errata 8 of the Specification Update of i82573.
   15692 		 * Errata 20 of the Specification Update of i82574.
   15693 		 * Errata 9 of the Specification Update of i82583.
   15694 		 */
   15695 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15696 		str = "L0s and L1 are";
   15697 		break;
   15698 	default:
   15699 		return;
   15700 	}
   15701 
   15702 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15703 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15704 	reg &= ~mask;
   15705 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15706 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15707 
   15708 	/* Print only in wm_attach() */
   15709 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15710 		aprint_verbose_dev(sc->sc_dev,
   15711 		    "ASPM %s disabled to workaround the errata.\n", str);
   15712 }
   15713 
   15714 /* LPLU */
   15715 
   15716 static void
   15717 wm_lplu_d0_disable(struct wm_softc *sc)
   15718 {
   15719 	struct mii_data *mii = &sc->sc_mii;
   15720 	uint32_t reg;
   15721 	uint16_t phyval;
   15722 
   15723 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15724 		device_xname(sc->sc_dev), __func__));
   15725 
   15726 	if (sc->sc_phytype == WMPHY_IFE)
   15727 		return;
   15728 
   15729 	switch (sc->sc_type) {
   15730 	case WM_T_82571:
   15731 	case WM_T_82572:
   15732 	case WM_T_82573:
   15733 	case WM_T_82575:
   15734 	case WM_T_82576:
   15735 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   15736 		phyval &= ~PMR_D0_LPLU;
   15737 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   15738 		break;
   15739 	case WM_T_82580:
   15740 	case WM_T_I350:
   15741 	case WM_T_I210:
   15742 	case WM_T_I211:
   15743 		reg = CSR_READ(sc, WMREG_PHPM);
   15744 		reg &= ~PHPM_D0A_LPLU;
   15745 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15746 		break;
   15747 	case WM_T_82574:
   15748 	case WM_T_82583:
   15749 	case WM_T_ICH8:
   15750 	case WM_T_ICH9:
   15751 	case WM_T_ICH10:
   15752 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15753 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15754 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15755 		CSR_WRITE_FLUSH(sc);
   15756 		break;
   15757 	case WM_T_PCH:
   15758 	case WM_T_PCH2:
   15759 	case WM_T_PCH_LPT:
   15760 	case WM_T_PCH_SPT:
   15761 	case WM_T_PCH_CNP:
   15762 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15763 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15764 		if (wm_phy_resetisblocked(sc) == false)
   15765 			phyval |= HV_OEM_BITS_ANEGNOW;
   15766 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15767 		break;
   15768 	default:
   15769 		break;
   15770 	}
   15771 }
   15772 
   15773 /* EEE */
   15774 
   15775 static int
   15776 wm_set_eee_i350(struct wm_softc *sc)
   15777 {
   15778 	struct ethercom *ec = &sc->sc_ethercom;
   15779 	uint32_t ipcnfg, eeer;
   15780 	uint32_t ipcnfg_mask
   15781 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15782 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15783 
   15784 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15785 
   15786 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15787 	eeer = CSR_READ(sc, WMREG_EEER);
   15788 
   15789 	/* Enable or disable per user setting */
   15790 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15791 		ipcnfg |= ipcnfg_mask;
   15792 		eeer |= eeer_mask;
   15793 	} else {
   15794 		ipcnfg &= ~ipcnfg_mask;
   15795 		eeer &= ~eeer_mask;
   15796 	}
   15797 
   15798 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15799 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15800 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15801 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15802 
   15803 	return 0;
   15804 }
   15805 
   15806 static int
   15807 wm_set_eee_pchlan(struct wm_softc *sc)
   15808 {
   15809 	device_t dev = sc->sc_dev;
   15810 	struct ethercom *ec = &sc->sc_ethercom;
   15811 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15812 	int rv = 0;
   15813 
   15814 	switch (sc->sc_phytype) {
   15815 	case WMPHY_82579:
   15816 		lpa = I82579_EEE_LP_ABILITY;
   15817 		pcs_status = I82579_EEE_PCS_STATUS;
   15818 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15819 		break;
   15820 	case WMPHY_I217:
   15821 		lpa = I217_EEE_LP_ABILITY;
   15822 		pcs_status = I217_EEE_PCS_STATUS;
   15823 		adv_addr = I217_EEE_ADVERTISEMENT;
   15824 		break;
   15825 	default:
   15826 		return 0;
   15827 	}
   15828 
   15829 	if (sc->phy.acquire(sc)) {
   15830 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15831 		return 0;
   15832 	}
   15833 
   15834 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15835 	if (rv != 0)
   15836 		goto release;
   15837 
   15838 	/* Clear bits that enable EEE in various speeds */
   15839 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15840 
   15841 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15842 		/* Save off link partner's EEE ability */
   15843 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15844 		if (rv != 0)
   15845 			goto release;
   15846 
   15847 		/* Read EEE advertisement */
   15848 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15849 			goto release;
   15850 
   15851 		/*
   15852 		 * Enable EEE only for speeds in which the link partner is
   15853 		 * EEE capable and for which we advertise EEE.
   15854 		 */
   15855 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15856 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15857 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15858 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15859 			if ((data & ANLPAR_TX_FD) != 0)
   15860 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15861 			else {
   15862 				/*
   15863 				 * EEE is not supported in 100Half, so ignore
   15864 				 * partner's EEE in 100 ability if full-duplex
   15865 				 * is not advertised.
   15866 				 */
   15867 				sc->eee_lp_ability
   15868 				    &= ~AN_EEEADVERT_100_TX;
   15869 			}
   15870 		}
   15871 	}
   15872 
   15873 	if (sc->sc_phytype == WMPHY_82579) {
   15874 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15875 		if (rv != 0)
   15876 			goto release;
   15877 
   15878 		data &= ~I82579_LPI_PLL_SHUT_100;
   15879 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15880 	}
   15881 
   15882 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15883 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15884 		goto release;
   15885 
   15886 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15887 release:
   15888 	sc->phy.release(sc);
   15889 
   15890 	return rv;
   15891 }
   15892 
   15893 static int
   15894 wm_set_eee(struct wm_softc *sc)
   15895 {
   15896 	struct ethercom *ec = &sc->sc_ethercom;
   15897 
   15898 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15899 		return 0;
   15900 
   15901 	if (sc->sc_type == WM_T_I354) {
   15902 		/* I354 uses an external PHY */
   15903 		return 0; /* not yet */
   15904 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15905 		return wm_set_eee_i350(sc);
   15906 	else if (sc->sc_type >= WM_T_PCH2)
   15907 		return wm_set_eee_pchlan(sc);
   15908 
   15909 	return 0;
   15910 }
   15911 
   15912 /*
   15913  * Workarounds (mainly PHY related).
   15914  * Basically, PHY's workarounds are in the PHY drivers.
   15915  */
   15916 
   15917 /* Work-around for 82566 Kumeran PCS lock loss */
   15918 static int
   15919 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15920 {
   15921 	struct mii_data *mii = &sc->sc_mii;
   15922 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15923 	int i, reg, rv;
   15924 	uint16_t phyreg;
   15925 
   15926 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15927 		device_xname(sc->sc_dev), __func__));
   15928 
   15929 	/* If the link is not up, do nothing */
   15930 	if ((status & STATUS_LU) == 0)
   15931 		return 0;
   15932 
   15933 	/* Nothing to do if the link is other than 1Gbps */
   15934 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15935 		return 0;
   15936 
   15937 	for (i = 0; i < 10; i++) {
   15938 		/* read twice */
   15939 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15940 		if (rv != 0)
   15941 			return rv;
   15942 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15943 		if (rv != 0)
   15944 			return rv;
   15945 
   15946 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15947 			goto out;	/* GOOD! */
   15948 
   15949 		/* Reset the PHY */
   15950 		wm_reset_phy(sc);
   15951 		delay(5*1000);
   15952 	}
   15953 
   15954 	/* Disable GigE link negotiation */
   15955 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15956 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15957 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15958 
   15959 	/*
   15960 	 * Call gig speed drop workaround on Gig disable before accessing
   15961 	 * any PHY registers.
   15962 	 */
   15963 	wm_gig_downshift_workaround_ich8lan(sc);
   15964 
   15965 out:
   15966 	return 0;
   15967 }
   15968 
   15969 /*
   15970  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15971  *  @sc: pointer to the HW structure
   15972  *
   15973  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15974  *  LPLU, Gig disable, MDIC PHY reset):
   15975  *    1) Set Kumeran Near-end loopback
   15976  *    2) Clear Kumeran Near-end loopback
   15977  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15978  */
   15979 static void
   15980 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15981 {
   15982 	uint16_t kmreg;
   15983 
   15984 	/* Only for igp3 */
   15985 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15986 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15987 			return;
   15988 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15989 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15990 			return;
   15991 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15992 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15993 	}
   15994 }
   15995 
   15996 /*
   15997  * Workaround for pch's PHYs
   15998  * XXX should be moved to new PHY driver?
   15999  */
   16000 static int
   16001 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16002 {
   16003 	device_t dev = sc->sc_dev;
   16004 	struct mii_data *mii = &sc->sc_mii;
   16005 	struct mii_softc *child;
   16006 	uint16_t phy_data, phyrev = 0;
   16007 	int phytype = sc->sc_phytype;
   16008 	int rv;
   16009 
   16010 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16011 		device_xname(dev), __func__));
   16012 	KASSERT(sc->sc_type == WM_T_PCH);
   16013 
   16014 	/* Set MDIO slow mode before any other MDIO access */
   16015 	if (phytype == WMPHY_82577)
   16016 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16017 			return rv;
   16018 
   16019 	child = LIST_FIRST(&mii->mii_phys);
   16020 	if (child != NULL)
   16021 		phyrev = child->mii_mpd_rev;
   16022 
   16023 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16024 	if ((child != NULL) &&
   16025 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16026 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16027 		/* Disable generation of early preamble (0x4431) */
   16028 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16029 		    &phy_data);
   16030 		if (rv != 0)
   16031 			return rv;
   16032 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16033 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16034 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16035 		    phy_data);
   16036 		if (rv != 0)
   16037 			return rv;
   16038 
   16039 		/* Preamble tuning for SSC */
   16040 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16041 		if (rv != 0)
   16042 			return rv;
   16043 	}
   16044 
   16045 	/* 82578 */
   16046 	if (phytype == WMPHY_82578) {
   16047 		/*
   16048 		 * Return registers to default by doing a soft reset then
   16049 		 * writing 0x3140 to the control register
   16050 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16051 		 */
   16052 		if ((child != NULL) && (phyrev < 2)) {
   16053 			PHY_RESET(child);
   16054 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16055 			if (rv != 0)
   16056 				return rv;
   16057 		}
   16058 	}
   16059 
   16060 	/* Select page 0 */
   16061 	if ((rv = sc->phy.acquire(sc)) != 0)
   16062 		return rv;
   16063 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16064 	sc->phy.release(sc);
   16065 	if (rv != 0)
   16066 		return rv;
   16067 
   16068 	/*
   16069 	 * Configure the K1 Si workaround during phy reset assuming there is
   16070 	 * link so that it disables K1 if link is in 1Gbps.
   16071 	 */
   16072 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16073 		return rv;
   16074 
   16075 	/* Workaround for link disconnects on a busy hub in half duplex */
   16076 	rv = sc->phy.acquire(sc);
   16077 	if (rv)
   16078 		return rv;
   16079 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16080 	if (rv)
   16081 		goto release;
   16082 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16083 	    phy_data & 0x00ff);
   16084 	if (rv)
   16085 		goto release;
   16086 
   16087 	/* Set MSE higher to enable link to stay up when noise is high */
   16088 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16089 release:
   16090 	sc->phy.release(sc);
   16091 
   16092 	return rv;
   16093 }
   16094 
   16095 /*
   16096  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16097  *  @sc:   pointer to the HW structure
   16098  */
   16099 static void
   16100 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16101 {
   16102 
   16103 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16104 		device_xname(sc->sc_dev), __func__));
   16105 
   16106 	if (sc->phy.acquire(sc) != 0)
   16107 		return;
   16108 
   16109 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16110 
   16111 	sc->phy.release(sc);
   16112 }
   16113 
   16114 static void
   16115 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16116 {
   16117 	device_t dev = sc->sc_dev;
   16118 	uint32_t mac_reg;
   16119 	uint16_t i, wuce;
   16120 	int count;
   16121 
   16122 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16123 		device_xname(dev), __func__));
   16124 
   16125 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16126 		return;
   16127 
   16128 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16129 	count = wm_rar_count(sc);
   16130 	for (i = 0; i < count; i++) {
   16131 		uint16_t lo, hi;
   16132 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16133 		lo = (uint16_t)(mac_reg & 0xffff);
   16134 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16135 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16136 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16137 
   16138 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16139 		lo = (uint16_t)(mac_reg & 0xffff);
   16140 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16141 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16142 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16143 	}
   16144 
   16145 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16146 }
   16147 
   16148 /*
   16149  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16150  *  with 82579 PHY
   16151  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16152  */
   16153 static int
   16154 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16155 {
   16156 	device_t dev = sc->sc_dev;
   16157 	int rar_count;
   16158 	int rv;
   16159 	uint32_t mac_reg;
   16160 	uint16_t dft_ctrl, data;
   16161 	uint16_t i;
   16162 
   16163 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16164 		device_xname(dev), __func__));
   16165 
   16166 	if (sc->sc_type < WM_T_PCH2)
   16167 		return 0;
   16168 
   16169 	/* Acquire PHY semaphore */
   16170 	rv = sc->phy.acquire(sc);
   16171 	if (rv != 0)
   16172 		return rv;
   16173 
   16174 	/* Disable Rx path while enabling/disabling workaround */
   16175 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16176 	if (rv != 0)
   16177 		goto out;
   16178 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16179 	    dft_ctrl | (1 << 14));
   16180 	if (rv != 0)
   16181 		goto out;
   16182 
   16183 	if (enable) {
   16184 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16185 		 * SHRAL/H) and initial CRC values to the MAC
   16186 		 */
   16187 		rar_count = wm_rar_count(sc);
   16188 		for (i = 0; i < rar_count; i++) {
   16189 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16190 			uint32_t addr_high, addr_low;
   16191 
   16192 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16193 			if (!(addr_high & RAL_AV))
   16194 				continue;
   16195 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16196 			mac_addr[0] = (addr_low & 0xFF);
   16197 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16198 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16199 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16200 			mac_addr[4] = (addr_high & 0xFF);
   16201 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16202 
   16203 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16204 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16205 		}
   16206 
   16207 		/* Write Rx addresses to the PHY */
   16208 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16209 	}
   16210 
   16211 	/*
   16212 	 * If enable ==
   16213 	 *	true: Enable jumbo frame workaround in the MAC.
   16214 	 *	false: Write MAC register values back to h/w defaults.
   16215 	 */
   16216 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16217 	if (enable) {
   16218 		mac_reg &= ~(1 << 14);
   16219 		mac_reg |= (7 << 15);
   16220 	} else
   16221 		mac_reg &= ~(0xf << 14);
   16222 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16223 
   16224 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16225 	if (enable) {
   16226 		mac_reg |= RCTL_SECRC;
   16227 		sc->sc_rctl |= RCTL_SECRC;
   16228 		sc->sc_flags |= WM_F_CRC_STRIP;
   16229 	} else {
   16230 		mac_reg &= ~RCTL_SECRC;
   16231 		sc->sc_rctl &= ~RCTL_SECRC;
   16232 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16233 	}
   16234 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16235 
   16236 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16237 	if (rv != 0)
   16238 		goto out;
   16239 	if (enable)
   16240 		data |= 1 << 0;
   16241 	else
   16242 		data &= ~(1 << 0);
   16243 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16244 	if (rv != 0)
   16245 		goto out;
   16246 
   16247 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16248 	if (rv != 0)
   16249 		goto out;
   16250 	/*
   16251 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16252 	 * on both the enable case and the disable case. Is it correct?
   16253 	 */
   16254 	data &= ~(0xf << 8);
   16255 	data |= (0xb << 8);
   16256 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16257 	if (rv != 0)
   16258 		goto out;
   16259 
   16260 	/*
   16261 	 * If enable ==
   16262 	 *	true: Enable jumbo frame workaround in the PHY.
   16263 	 *	false: Write PHY register values back to h/w defaults.
   16264 	 */
   16265 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16266 	if (rv != 0)
   16267 		goto out;
   16268 	data &= ~(0x7F << 5);
   16269 	if (enable)
   16270 		data |= (0x37 << 5);
   16271 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16272 	if (rv != 0)
   16273 		goto out;
   16274 
   16275 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16276 	if (rv != 0)
   16277 		goto out;
   16278 	if (enable)
   16279 		data &= ~(1 << 13);
   16280 	else
   16281 		data |= (1 << 13);
   16282 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16283 	if (rv != 0)
   16284 		goto out;
   16285 
   16286 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16287 	if (rv != 0)
   16288 		goto out;
   16289 	data &= ~(0x3FF << 2);
   16290 	if (enable)
   16291 		data |= (I82579_TX_PTR_GAP << 2);
   16292 	else
   16293 		data |= (0x8 << 2);
   16294 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16295 	if (rv != 0)
   16296 		goto out;
   16297 
   16298 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16299 	    enable ? 0xf100 : 0x7e00);
   16300 	if (rv != 0)
   16301 		goto out;
   16302 
   16303 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16304 	if (rv != 0)
   16305 		goto out;
   16306 	if (enable)
   16307 		data |= 1 << 10;
   16308 	else
   16309 		data &= ~(1 << 10);
   16310 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16311 	if (rv != 0)
   16312 		goto out;
   16313 
   16314 	/* Re-enable Rx path after enabling/disabling workaround */
   16315 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16316 	    dft_ctrl & ~(1 << 14));
   16317 
   16318 out:
   16319 	sc->phy.release(sc);
   16320 
   16321 	return rv;
   16322 }
   16323 
   16324 /*
   16325  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16326  *  done after every PHY reset.
   16327  */
   16328 static int
   16329 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16330 {
   16331 	device_t dev = sc->sc_dev;
   16332 	int rv;
   16333 
   16334 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16335 		device_xname(dev), __func__));
   16336 	KASSERT(sc->sc_type == WM_T_PCH2);
   16337 
   16338 	/* Set MDIO slow mode before any other MDIO access */
   16339 	rv = wm_set_mdio_slow_mode_hv(sc);
   16340 	if (rv != 0)
   16341 		return rv;
   16342 
   16343 	rv = sc->phy.acquire(sc);
   16344 	if (rv != 0)
   16345 		return rv;
   16346 	/* Set MSE higher to enable link to stay up when noise is high */
   16347 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16348 	if (rv != 0)
   16349 		goto release;
   16350 	/* Drop link after 5 times MSE threshold was reached */
   16351 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16352 release:
   16353 	sc->phy.release(sc);
   16354 
   16355 	return rv;
   16356 }
   16357 
   16358 /**
   16359  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16360  *  @link: link up bool flag
   16361  *
   16362  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16363  *  preventing further DMA write requests.  Workaround the issue by disabling
   16364  *  the de-assertion of the clock request when in 1Gpbs mode.
   16365  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   16366  *  speeds in order to avoid Tx hangs.
   16367  **/
   16368 static int
   16369 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   16370 {
   16371 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   16372 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16373 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   16374 	uint16_t phyreg;
   16375 
   16376 	if (link && (speed == STATUS_SPEED_1000)) {
   16377 		sc->phy.acquire(sc);
   16378 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16379 		    &phyreg);
   16380 		if (rv != 0)
   16381 			goto release;
   16382 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16383 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   16384 		if (rv != 0)
   16385 			goto release;
   16386 		delay(20);
   16387 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   16388 
   16389 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16390 		    &phyreg);
   16391 release:
   16392 		sc->phy.release(sc);
   16393 		return rv;
   16394 	}
   16395 
   16396 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   16397 
   16398 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   16399 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   16400 	    || !link
   16401 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   16402 		goto update_fextnvm6;
   16403 
   16404 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   16405 
   16406 	/* Clear link status transmit timeout */
   16407 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   16408 	if (speed == STATUS_SPEED_100) {
   16409 		/* Set inband Tx timeout to 5x10us for 100Half */
   16410 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16411 
   16412 		/* Do not extend the K1 entry latency for 100Half */
   16413 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16414 	} else {
   16415 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   16416 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16417 
   16418 		/* Extend the K1 entry latency for 10 Mbps */
   16419 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16420 	}
   16421 
   16422 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   16423 
   16424 update_fextnvm6:
   16425 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   16426 	return 0;
   16427 }
   16428 
   16429 /*
   16430  *  wm_k1_gig_workaround_hv - K1 Si workaround
   16431  *  @sc:   pointer to the HW structure
   16432  *  @link: link up bool flag
   16433  *
   16434  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   16435  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   16436  *  If link is down, the function will restore the default K1 setting located
   16437  *  in the NVM.
   16438  */
   16439 static int
   16440 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   16441 {
   16442 	int k1_enable = sc->sc_nvm_k1_enabled;
   16443 
   16444 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16445 		device_xname(sc->sc_dev), __func__));
   16446 
   16447 	if (sc->phy.acquire(sc) != 0)
   16448 		return -1;
   16449 
   16450 	if (link) {
   16451 		k1_enable = 0;
   16452 
   16453 		/* Link stall fix for link up */
   16454 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16455 		    0x0100);
   16456 	} else {
   16457 		/* Link stall fix for link down */
   16458 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16459 		    0x4100);
   16460 	}
   16461 
   16462 	wm_configure_k1_ich8lan(sc, k1_enable);
   16463 	sc->phy.release(sc);
   16464 
   16465 	return 0;
   16466 }
   16467 
   16468 /*
   16469  *  wm_k1_workaround_lv - K1 Si workaround
   16470  *  @sc:   pointer to the HW structure
   16471  *
   16472  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16473  *  Disable K1 for 1000 and 100 speeds
   16474  */
   16475 static int
   16476 wm_k1_workaround_lv(struct wm_softc *sc)
   16477 {
   16478 	uint32_t reg;
   16479 	uint16_t phyreg;
   16480 	int rv;
   16481 
   16482 	if (sc->sc_type != WM_T_PCH2)
   16483 		return 0;
   16484 
   16485 	/* Set K1 beacon duration based on 10Mbps speed */
   16486 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16487 	if (rv != 0)
   16488 		return rv;
   16489 
   16490 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16491 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16492 		if (phyreg &
   16493 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16494 			/* LV 1G/100 Packet drop issue wa  */
   16495 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16496 			    &phyreg);
   16497 			if (rv != 0)
   16498 				return rv;
   16499 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16500 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16501 			    phyreg);
   16502 			if (rv != 0)
   16503 				return rv;
   16504 		} else {
   16505 			/* For 10Mbps */
   16506 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16507 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16508 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16509 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16510 		}
   16511 	}
   16512 
   16513 	return 0;
   16514 }
   16515 
   16516 /*
   16517  *  wm_link_stall_workaround_hv - Si workaround
   16518  *  @sc: pointer to the HW structure
   16519  *
   16520  *  This function works around a Si bug where the link partner can get
   16521  *  a link up indication before the PHY does. If small packets are sent
   16522  *  by the link partner they can be placed in the packet buffer without
   16523  *  being properly accounted for by the PHY and will stall preventing
   16524  *  further packets from being received.  The workaround is to clear the
   16525  *  packet buffer after the PHY detects link up.
   16526  */
   16527 static int
   16528 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16529 {
   16530 	uint16_t phyreg;
   16531 
   16532 	if (sc->sc_phytype != WMPHY_82578)
   16533 		return 0;
   16534 
   16535 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16536 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16537 	if ((phyreg & BMCR_LOOP) != 0)
   16538 		return 0;
   16539 
   16540 	/* Check if link is up and at 1Gbps */
   16541 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16542 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16543 	    | BM_CS_STATUS_SPEED_MASK;
   16544 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16545 		| BM_CS_STATUS_SPEED_1000))
   16546 		return 0;
   16547 
   16548 	delay(200 * 1000);	/* XXX too big */
   16549 
   16550 	/* Flush the packets in the fifo buffer */
   16551 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16552 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16553 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16554 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16555 
   16556 	return 0;
   16557 }
   16558 
   16559 static int
   16560 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16561 {
   16562 	int rv;
   16563 	uint16_t reg;
   16564 
   16565 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16566 	if (rv != 0)
   16567 		return rv;
   16568 
   16569 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16570 	    reg | HV_KMRN_MDIO_SLOW);
   16571 }
   16572 
   16573 /*
   16574  *  wm_configure_k1_ich8lan - Configure K1 power state
   16575  *  @sc: pointer to the HW structure
   16576  *  @enable: K1 state to configure
   16577  *
   16578  *  Configure the K1 power state based on the provided parameter.
   16579  *  Assumes semaphore already acquired.
   16580  */
   16581 static void
   16582 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16583 {
   16584 	uint32_t ctrl, ctrl_ext, tmp;
   16585 	uint16_t kmreg;
   16586 	int rv;
   16587 
   16588 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16589 
   16590 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16591 	if (rv != 0)
   16592 		return;
   16593 
   16594 	if (k1_enable)
   16595 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16596 	else
   16597 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16598 
   16599 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16600 	if (rv != 0)
   16601 		return;
   16602 
   16603 	delay(20);
   16604 
   16605 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16606 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16607 
   16608 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16609 	tmp |= CTRL_FRCSPD;
   16610 
   16611 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16612 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16613 	CSR_WRITE_FLUSH(sc);
   16614 	delay(20);
   16615 
   16616 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16617 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16618 	CSR_WRITE_FLUSH(sc);
   16619 	delay(20);
   16620 
   16621 	return;
   16622 }
   16623 
   16624 /* special case - for 82575 - need to do manual init ... */
   16625 static void
   16626 wm_reset_init_script_82575(struct wm_softc *sc)
   16627 {
   16628 	/*
   16629 	 * Remark: this is untested code - we have no board without EEPROM
   16630 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16631 	 */
   16632 
   16633 	/* SerDes configuration via SERDESCTRL */
   16634 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16635 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16636 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16637 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16638 
   16639 	/* CCM configuration via CCMCTL register */
   16640 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16641 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16642 
   16643 	/* PCIe lanes configuration */
   16644 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16645 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16646 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16647 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16648 
   16649 	/* PCIe PLL Configuration */
   16650 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16651 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16652 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16653 }
   16654 
   16655 static void
   16656 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16657 {
   16658 	uint32_t reg;
   16659 	uint16_t nvmword;
   16660 	int rv;
   16661 
   16662 	if (sc->sc_type != WM_T_82580)
   16663 		return;
   16664 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16665 		return;
   16666 
   16667 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16668 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16669 	if (rv != 0) {
   16670 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16671 		    __func__);
   16672 		return;
   16673 	}
   16674 
   16675 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16676 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16677 		reg |= MDICNFG_DEST;
   16678 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16679 		reg |= MDICNFG_COM_MDIO;
   16680 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16681 }
   16682 
   16683 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16684 
   16685 static bool
   16686 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16687 {
   16688 	uint32_t reg;
   16689 	uint16_t id1, id2;
   16690 	int i, rv;
   16691 
   16692 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16693 		device_xname(sc->sc_dev), __func__));
   16694 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16695 
   16696 	id1 = id2 = 0xffff;
   16697 	for (i = 0; i < 2; i++) {
   16698 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16699 		    &id1);
   16700 		if ((rv != 0) || MII_INVALIDID(id1))
   16701 			continue;
   16702 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16703 		    &id2);
   16704 		if ((rv != 0) || MII_INVALIDID(id2))
   16705 			continue;
   16706 		break;
   16707 	}
   16708 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16709 		goto out;
   16710 
   16711 	/*
   16712 	 * In case the PHY needs to be in mdio slow mode,
   16713 	 * set slow mode and try to get the PHY id again.
   16714 	 */
   16715 	rv = 0;
   16716 	if (sc->sc_type < WM_T_PCH_LPT) {
   16717 		sc->phy.release(sc);
   16718 		wm_set_mdio_slow_mode_hv(sc);
   16719 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16720 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16721 		sc->phy.acquire(sc);
   16722 	}
   16723 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16724 		device_printf(sc->sc_dev, "XXX return with false\n");
   16725 		return false;
   16726 	}
   16727 out:
   16728 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16729 		/* Only unforce SMBus if ME is not active */
   16730 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16731 			uint16_t phyreg;
   16732 
   16733 			/* Unforce SMBus mode in PHY */
   16734 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16735 			    CV_SMB_CTRL, &phyreg);
   16736 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16737 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16738 			    CV_SMB_CTRL, phyreg);
   16739 
   16740 			/* Unforce SMBus mode in MAC */
   16741 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16742 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16743 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16744 		}
   16745 	}
   16746 	return true;
   16747 }
   16748 
   16749 static void
   16750 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16751 {
   16752 	uint32_t reg;
   16753 	int i;
   16754 
   16755 	/* Set PHY Config Counter to 50msec */
   16756 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16757 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16758 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16759 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16760 
   16761 	/* Toggle LANPHYPC */
   16762 	reg = CSR_READ(sc, WMREG_CTRL);
   16763 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16764 	reg &= ~CTRL_LANPHYPC_VALUE;
   16765 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16766 	CSR_WRITE_FLUSH(sc);
   16767 	delay(1000);
   16768 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16769 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16770 	CSR_WRITE_FLUSH(sc);
   16771 
   16772 	if (sc->sc_type < WM_T_PCH_LPT)
   16773 		delay(50 * 1000);
   16774 	else {
   16775 		i = 20;
   16776 
   16777 		do {
   16778 			delay(5 * 1000);
   16779 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16780 		    && i--);
   16781 
   16782 		delay(30 * 1000);
   16783 	}
   16784 }
   16785 
   16786 static int
   16787 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16788 {
   16789 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16790 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16791 	uint32_t rxa;
   16792 	uint16_t scale = 0, lat_enc = 0;
   16793 	int32_t obff_hwm = 0;
   16794 	int64_t lat_ns, value;
   16795 
   16796 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16797 		device_xname(sc->sc_dev), __func__));
   16798 
   16799 	if (link) {
   16800 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16801 		uint32_t status;
   16802 		uint16_t speed;
   16803 		pcireg_t preg;
   16804 
   16805 		status = CSR_READ(sc, WMREG_STATUS);
   16806 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16807 		case STATUS_SPEED_10:
   16808 			speed = 10;
   16809 			break;
   16810 		case STATUS_SPEED_100:
   16811 			speed = 100;
   16812 			break;
   16813 		case STATUS_SPEED_1000:
   16814 			speed = 1000;
   16815 			break;
   16816 		default:
   16817 			device_printf(sc->sc_dev, "Unknown speed "
   16818 			    "(status = %08x)\n", status);
   16819 			return -1;
   16820 		}
   16821 
   16822 		/* Rx Packet Buffer Allocation size (KB) */
   16823 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16824 
   16825 		/*
   16826 		 * Determine the maximum latency tolerated by the device.
   16827 		 *
   16828 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16829 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16830 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16831 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16832 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16833 		 */
   16834 		lat_ns = ((int64_t)rxa * 1024 -
   16835 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16836 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16837 		if (lat_ns < 0)
   16838 			lat_ns = 0;
   16839 		else
   16840 			lat_ns /= speed;
   16841 		value = lat_ns;
   16842 
   16843 		while (value > LTRV_VALUE) {
   16844 			scale ++;
   16845 			value = howmany(value, __BIT(5));
   16846 		}
   16847 		if (scale > LTRV_SCALE_MAX) {
   16848 			device_printf(sc->sc_dev,
   16849 			    "Invalid LTR latency scale %d\n", scale);
   16850 			return -1;
   16851 		}
   16852 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16853 
   16854 		/* Determine the maximum latency tolerated by the platform */
   16855 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16856 		    WM_PCI_LTR_CAP_LPT);
   16857 		max_snoop = preg & 0xffff;
   16858 		max_nosnoop = preg >> 16;
   16859 
   16860 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16861 
   16862 		if (lat_enc > max_ltr_enc) {
   16863 			lat_enc = max_ltr_enc;
   16864 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16865 			    * PCI_LTR_SCALETONS(
   16866 				    __SHIFTOUT(lat_enc,
   16867 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16868 		}
   16869 
   16870 		if (lat_ns) {
   16871 			lat_ns *= speed * 1000;
   16872 			lat_ns /= 8;
   16873 			lat_ns /= 1000000000;
   16874 			obff_hwm = (int32_t)(rxa - lat_ns);
   16875 		}
   16876 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16877 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16878 			    "(rxa = %d, lat_ns = %d)\n",
   16879 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16880 			return -1;
   16881 		}
   16882 	}
   16883 	/* Snoop and No-Snoop latencies the same */
   16884 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16885 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16886 
   16887 	/* Set OBFF high water mark */
   16888 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16889 	reg |= obff_hwm;
   16890 	CSR_WRITE(sc, WMREG_SVT, reg);
   16891 
   16892 	/* Enable OBFF */
   16893 	reg = CSR_READ(sc, WMREG_SVCR);
   16894 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16895 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16896 
   16897 	return 0;
   16898 }
   16899 
   16900 /*
   16901  * I210 Errata 25 and I211 Errata 10
   16902  * Slow System Clock.
   16903  *
   16904  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   16905  */
   16906 static int
   16907 wm_pll_workaround_i210(struct wm_softc *sc)
   16908 {
   16909 	uint32_t mdicnfg, wuc;
   16910 	uint32_t reg;
   16911 	pcireg_t pcireg;
   16912 	uint32_t pmreg;
   16913 	uint16_t nvmword, tmp_nvmword;
   16914 	uint16_t phyval;
   16915 	bool wa_done = false;
   16916 	int i, rv = 0;
   16917 
   16918 	/* Get Power Management cap offset */
   16919 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16920 	    &pmreg, NULL) == 0)
   16921 		return -1;
   16922 
   16923 	/* Save WUC and MDICNFG registers */
   16924 	wuc = CSR_READ(sc, WMREG_WUC);
   16925 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16926 
   16927 	reg = mdicnfg & ~MDICNFG_DEST;
   16928 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16929 
   16930 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   16931 		/*
   16932 		 * The default value of the Initialization Control Word 1
   16933 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   16934 		 */
   16935 		nvmword = INVM_DEFAULT_AL;
   16936 	}
   16937 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16938 
   16939 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16940 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16941 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16942 
   16943 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16944 			rv = 0;
   16945 			break; /* OK */
   16946 		} else
   16947 			rv = -1;
   16948 
   16949 		wa_done = true;
   16950 		/* Directly reset the internal PHY */
   16951 		reg = CSR_READ(sc, WMREG_CTRL);
   16952 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16953 
   16954 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16955 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16956 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16957 
   16958 		CSR_WRITE(sc, WMREG_WUC, 0);
   16959 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16960 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16961 
   16962 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16963 		    pmreg + PCI_PMCSR);
   16964 		pcireg |= PCI_PMCSR_STATE_D3;
   16965 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16966 		    pmreg + PCI_PMCSR, pcireg);
   16967 		delay(1000);
   16968 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16969 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16970 		    pmreg + PCI_PMCSR, pcireg);
   16971 
   16972 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16973 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16974 
   16975 		/* Restore WUC register */
   16976 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16977 	}
   16978 
   16979 	/* Restore MDICNFG setting */
   16980 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16981 	if (wa_done)
   16982 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16983 	return rv;
   16984 }
   16985 
   16986 static void
   16987 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16988 {
   16989 	uint32_t reg;
   16990 
   16991 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16992 		device_xname(sc->sc_dev), __func__));
   16993 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16994 	    || (sc->sc_type == WM_T_PCH_CNP));
   16995 
   16996 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16997 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16998 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16999 
   17000 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17001 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17002 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17003 }
   17004 
   17005 /* Sysctl function */
   17006 #ifdef WM_DEBUG
   17007 static int
   17008 wm_sysctl_debug(SYSCTLFN_ARGS)
   17009 {
   17010 	struct sysctlnode node = *rnode;
   17011 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17012 	uint32_t dflags;
   17013 	int error;
   17014 
   17015 	dflags = sc->sc_debug;
   17016 	node.sysctl_data = &dflags;
   17017 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17018 
   17019 	if (error || newp == NULL)
   17020 		return error;
   17021 
   17022 	sc->sc_debug = dflags;
   17023 
   17024 	return 0;
   17025 }
   17026 #endif
   17027