Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.707
      1 /*	$NetBSD: if_wm.c,v 1.707 2021/10/13 08:09:46 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.707 2021/10/13 08:09:46 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 #include <sys/atomic.h>
    111 
    112 #include <sys/rndsource.h>
    113 
    114 #include <net/if.h>
    115 #include <net/if_dl.h>
    116 #include <net/if_media.h>
    117 #include <net/if_ether.h>
    118 
    119 #include <net/bpf.h>
    120 
    121 #include <net/rss_config.h>
    122 
    123 #include <netinet/in.h>			/* XXX for struct ip */
    124 #include <netinet/in_systm.h>		/* XXX for struct ip */
    125 #include <netinet/ip.h>			/* XXX for struct ip */
    126 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    127 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    128 
    129 #include <sys/bus.h>
    130 #include <sys/intr.h>
    131 #include <machine/endian.h>
    132 
    133 #include <dev/mii/mii.h>
    134 #include <dev/mii/mdio.h>
    135 #include <dev/mii/miivar.h>
    136 #include <dev/mii/miidevs.h>
    137 #include <dev/mii/mii_bitbang.h>
    138 #include <dev/mii/ikphyreg.h>
    139 #include <dev/mii/igphyreg.h>
    140 #include <dev/mii/igphyvar.h>
    141 #include <dev/mii/inbmphyreg.h>
    142 #include <dev/mii/ihphyreg.h>
    143 #include <dev/mii/makphyreg.h>
    144 
    145 #include <dev/pci/pcireg.h>
    146 #include <dev/pci/pcivar.h>
    147 #include <dev/pci/pcidevs.h>
    148 
    149 #include <dev/pci/if_wmreg.h>
    150 #include <dev/pci/if_wmvar.h>
    151 
    152 #ifdef WM_DEBUG
    153 #define	WM_DEBUG_LINK		__BIT(0)
    154 #define	WM_DEBUG_TX		__BIT(1)
    155 #define	WM_DEBUG_RX		__BIT(2)
    156 #define	WM_DEBUG_GMII		__BIT(3)
    157 #define	WM_DEBUG_MANAGE		__BIT(4)
    158 #define	WM_DEBUG_NVM		__BIT(5)
    159 #define	WM_DEBUG_INIT		__BIT(6)
    160 #define	WM_DEBUG_LOCK		__BIT(7)
    161 
    162 #if 0
    163 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    164 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    165 	WM_DEBUG_LOCK
    166 #endif
    167 
    168 #define	DPRINTF(sc, x, y)			  \
    169 	do {					  \
    170 		if ((sc)->sc_debug & (x))	  \
    171 			printf y;		  \
    172 	} while (0)
    173 #else
    174 #define	DPRINTF(sc, x, y)	__nothing
    175 #endif /* WM_DEBUG */
    176 
    177 #ifdef NET_MPSAFE
    178 #define WM_MPSAFE	1
    179 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    180 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    181 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    182 #else
    183 #define WM_CALLOUT_FLAGS	0
    184 #define WM_SOFTINT_FLAGS	0
    185 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    186 #endif
    187 
    188 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    189 
    190 /*
    191  * This device driver's max interrupt numbers.
    192  */
    193 #define WM_MAX_NQUEUEINTR	16
    194 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    195 
    196 #ifndef WM_DISABLE_MSI
    197 #define	WM_DISABLE_MSI 0
    198 #endif
    199 #ifndef WM_DISABLE_MSIX
    200 #define	WM_DISABLE_MSIX 0
    201 #endif
    202 
    203 int wm_disable_msi = WM_DISABLE_MSI;
    204 int wm_disable_msix = WM_DISABLE_MSIX;
    205 
    206 #ifndef WM_WATCHDOG_TIMEOUT
    207 #define WM_WATCHDOG_TIMEOUT 5
    208 #endif
    209 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    210 
    211 /*
    212  * Transmit descriptor list size.  Due to errata, we can only have
    213  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    214  * on >= 82544. We tell the upper layers that they can queue a lot
    215  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    216  * of them at a time.
    217  *
    218  * We allow up to 64 DMA segments per packet.  Pathological packet
    219  * chains containing many small mbufs have been observed in zero-copy
    220  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    221  * m_defrag() is called to reduce it.
    222  */
    223 #define	WM_NTXSEGS		64
    224 #define	WM_IFQUEUELEN		256
    225 #define	WM_TXQUEUELEN_MAX	64
    226 #define	WM_TXQUEUELEN_MAX_82547	16
    227 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    228 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    229 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    230 #define	WM_NTXDESC_82542	256
    231 #define	WM_NTXDESC_82544	4096
    232 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    233 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    234 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    235 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    236 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    237 
    238 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    239 
    240 #define	WM_TXINTERQSIZE		256
    241 
    242 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 /*
    250  * Receive descriptor list size.  We have one Rx buffer for normal
    251  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    252  * packet.  We allocate 256 receive descriptors, each with a 2k
    253  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    254  */
    255 #define	WM_NRXDESC		256U
    256 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    257 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    258 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    259 
    260 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    261 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    262 #endif
    263 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    264 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    265 #endif
    266 
    267 typedef union txdescs {
    268 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    269 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    270 } txdescs_t;
    271 
    272 typedef union rxdescs {
    273 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    274 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    275 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    276 } rxdescs_t;
    277 
    278 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    279 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    280 
    281 /*
    282  * Software state for transmit jobs.
    283  */
    284 struct wm_txsoft {
    285 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    287 	int txs_firstdesc;		/* first descriptor in packet */
    288 	int txs_lastdesc;		/* last descriptor in packet */
    289 	int txs_ndesc;			/* # of descriptors used */
    290 };
    291 
    292 /*
    293  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    294  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    295  * them together.
    296  */
    297 struct wm_rxsoft {
    298 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    299 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    300 };
    301 
    302 #define WM_LINKUP_TIMEOUT	50
    303 
    304 static uint16_t swfwphysem[] = {
    305 	SWFW_PHY0_SM,
    306 	SWFW_PHY1_SM,
    307 	SWFW_PHY2_SM,
    308 	SWFW_PHY3_SM
    309 };
    310 
    311 static const uint32_t wm_82580_rxpbs_table[] = {
    312 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    313 };
    314 
    315 struct wm_softc;
    316 
    317 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    318 #if !defined(WM_EVENT_COUNTERS)
    319 #define WM_EVENT_COUNTERS 1
    320 #endif
    321 #endif
    322 
    323 #ifdef WM_EVENT_COUNTERS
    324 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    325 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    326 	struct evcnt qname##_ev_##evname;
    327 
    328 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    329 	do {								\
    330 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    331 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    332 		    "%s%02d%s", #qname, (qnum), #evname);		\
    333 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    334 		    (evtype), NULL, (xname),				\
    335 		    (q)->qname##_##evname##_evcnt_name);		\
    336 	} while (0)
    337 
    338 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    339 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    340 
    341 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    342 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    343 
    344 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    345 	evcnt_detach(&(q)->qname##_ev_##evname);
    346 #endif /* WM_EVENT_COUNTERS */
    347 
    348 struct wm_txqueue {
    349 	kmutex_t *txq_lock;		/* lock for tx operations */
    350 
    351 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    352 
    353 	/* Software state for the transmit descriptors. */
    354 	int txq_num;			/* must be a power of two */
    355 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    356 
    357 	/* TX control data structures. */
    358 	int txq_ndesc;			/* must be a power of two */
    359 	size_t txq_descsize;		/* a tx descriptor size */
    360 	txdescs_t *txq_descs_u;
    361 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    362 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    363 	int txq_desc_rseg;		/* real number of control segment */
    364 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    365 #define	txq_descs	txq_descs_u->sctxu_txdescs
    366 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    367 
    368 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    369 
    370 	int txq_free;			/* number of free Tx descriptors */
    371 	int txq_next;			/* next ready Tx descriptor */
    372 
    373 	int txq_sfree;			/* number of free Tx jobs */
    374 	int txq_snext;			/* next free Tx job */
    375 	int txq_sdirty;			/* dirty Tx jobs */
    376 
    377 	/* These 4 variables are used only on the 82547. */
    378 	int txq_fifo_size;		/* Tx FIFO size */
    379 	int txq_fifo_head;		/* current head of FIFO */
    380 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    381 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    382 
    383 	/*
    384 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    385 	 * CPUs. This queue intermediate them without block.
    386 	 */
    387 	pcq_t *txq_interq;
    388 
    389 	/*
    390 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    391 	 * to manage Tx H/W queue's busy flag.
    392 	 */
    393 	int txq_flags;			/* flags for H/W queue, see below */
    394 #define	WM_TXQ_NO_SPACE		0x1
    395 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    396 
    397 	bool txq_stopping;
    398 
    399 	bool txq_sending;
    400 	time_t txq_lastsent;
    401 
    402 	/* Checksum flags used for previous packet */
    403 	uint32_t	txq_last_hw_cmd;
    404 	uint8_t		txq_last_hw_fields;
    405 	uint16_t	txq_last_hw_ipcs;
    406 	uint16_t	txq_last_hw_tucs;
    407 
    408 	uint32_t txq_packets;		/* for AIM */
    409 	uint32_t txq_bytes;		/* for AIM */
    410 #ifdef WM_EVENT_COUNTERS
    411 	/* TX event counters */
    412 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    413 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    414 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    415 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    416 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    417 					    /* XXX not used? */
    418 
    419 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    420 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    421 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    422 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    423 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    424 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    425 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    426 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    427 					    /* other than toomanyseg */
    428 
    429 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    430 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    431 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    432 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
    433 
    434 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    435 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    436 #endif /* WM_EVENT_COUNTERS */
    437 };
    438 
    439 struct wm_rxqueue {
    440 	kmutex_t *rxq_lock;		/* lock for rx operations */
    441 
    442 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    443 
    444 	/* Software state for the receive descriptors. */
    445 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    446 
    447 	/* RX control data structures. */
    448 	int rxq_ndesc;			/* must be a power of two */
    449 	size_t rxq_descsize;		/* a rx descriptor size */
    450 	rxdescs_t *rxq_descs_u;
    451 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    452 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    453 	int rxq_desc_rseg;		/* real number of control segment */
    454 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    455 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    456 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    457 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    458 
    459 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    460 
    461 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    462 	int rxq_discard;
    463 	int rxq_len;
    464 	struct mbuf *rxq_head;
    465 	struct mbuf *rxq_tail;
    466 	struct mbuf **rxq_tailp;
    467 
    468 	bool rxq_stopping;
    469 
    470 	uint32_t rxq_packets;		/* for AIM */
    471 	uint32_t rxq_bytes;		/* for AIM */
    472 #ifdef WM_EVENT_COUNTERS
    473 	/* RX event counters */
    474 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    475 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    476 
    477 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    478 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    479 #endif
    480 };
    481 
    482 struct wm_queue {
    483 	int wmq_id;			/* index of TX/RX queues */
    484 	int wmq_intr_idx;		/* index of MSI-X tables */
    485 
    486 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    487 	bool wmq_set_itr;
    488 
    489 	struct wm_txqueue wmq_txq;
    490 	struct wm_rxqueue wmq_rxq;
    491 	char sysctlname[32];		/* Name for sysctl */
    492 
    493 	bool wmq_txrx_use_workqueue;
    494 	struct work wmq_cookie;
    495 	void *wmq_si;
    496 };
    497 
    498 struct wm_phyop {
    499 	int (*acquire)(struct wm_softc *);
    500 	void (*release)(struct wm_softc *);
    501 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    502 	int (*writereg_locked)(device_t, int, int, uint16_t);
    503 	int reset_delay_us;
    504 	bool no_errprint;
    505 };
    506 
    507 struct wm_nvmop {
    508 	int (*acquire)(struct wm_softc *);
    509 	void (*release)(struct wm_softc *);
    510 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    511 };
    512 
    513 /*
    514  * Software state per device.
    515  */
    516 struct wm_softc {
    517 	device_t sc_dev;		/* generic device information */
    518 	bus_space_tag_t sc_st;		/* bus space tag */
    519 	bus_space_handle_t sc_sh;	/* bus space handle */
    520 	bus_size_t sc_ss;		/* bus space size */
    521 	bus_space_tag_t sc_iot;		/* I/O space tag */
    522 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    523 	bus_size_t sc_ios;		/* I/O space size */
    524 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    525 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    526 	bus_size_t sc_flashs;		/* flash registers space size */
    527 	off_t sc_flashreg_offset;	/*
    528 					 * offset to flash registers from
    529 					 * start of BAR
    530 					 */
    531 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    532 
    533 	struct ethercom sc_ethercom;	/* ethernet common data */
    534 	struct mii_data sc_mii;		/* MII/media information */
    535 
    536 	pci_chipset_tag_t sc_pc;
    537 	pcitag_t sc_pcitag;
    538 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    539 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    540 
    541 	uint16_t sc_pcidevid;		/* PCI device ID */
    542 	wm_chip_type sc_type;		/* MAC type */
    543 	int sc_rev;			/* MAC revision */
    544 	wm_phy_type sc_phytype;		/* PHY type */
    545 	uint8_t sc_sfptype;		/* SFP type */
    546 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    547 #define	WM_MEDIATYPE_UNKNOWN		0x00
    548 #define	WM_MEDIATYPE_FIBER		0x01
    549 #define	WM_MEDIATYPE_COPPER		0x02
    550 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    551 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    552 	int sc_flags;			/* flags; see below */
    553 	u_short sc_if_flags;		/* last if_flags */
    554 	int sc_ec_capenable;		/* last ec_capenable */
    555 	int sc_flowflags;		/* 802.3x flow control flags */
    556 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    557 	int sc_align_tweak;
    558 
    559 	void *sc_ihs[WM_MAX_NINTR];	/*
    560 					 * interrupt cookie.
    561 					 * - legacy and msi use sc_ihs[0] only
    562 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    563 					 */
    564 	pci_intr_handle_t *sc_intrs;	/*
    565 					 * legacy and msi use sc_intrs[0] only
    566 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    567 					 */
    568 	int sc_nintrs;			/* number of interrupts */
    569 
    570 	int sc_link_intr_idx;		/* index of MSI-X tables */
    571 
    572 	callout_t sc_tick_ch;		/* tick callout */
    573 	bool sc_core_stopping;
    574 
    575 	int sc_nvm_ver_major;
    576 	int sc_nvm_ver_minor;
    577 	int sc_nvm_ver_build;
    578 	int sc_nvm_addrbits;		/* NVM address bits */
    579 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    580 	int sc_ich8_flash_base;
    581 	int sc_ich8_flash_bank_size;
    582 	int sc_nvm_k1_enabled;
    583 
    584 	int sc_nqueues;
    585 	struct wm_queue *sc_queue;
    586 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    587 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    588 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    589 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    590 	struct workqueue *sc_queue_wq;
    591 	bool sc_txrx_use_workqueue;
    592 
    593 	int sc_affinity_offset;
    594 
    595 #ifdef WM_EVENT_COUNTERS
    596 	/* Event counters. */
    597 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    598 
    599 	/* WM_T_82542_2_1 only */
    600 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    601 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    602 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    603 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    604 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    605 #endif /* WM_EVENT_COUNTERS */
    606 
    607 	struct sysctllog *sc_sysctllog;
    608 
    609 	/* This variable are used only on the 82547. */
    610 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    611 
    612 	uint32_t sc_ctrl;		/* prototype CTRL register */
    613 #if 0
    614 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    615 #endif
    616 	uint32_t sc_icr;		/* prototype interrupt bits */
    617 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    618 	uint32_t sc_tctl;		/* prototype TCTL register */
    619 	uint32_t sc_rctl;		/* prototype RCTL register */
    620 	uint32_t sc_txcw;		/* prototype TXCW register */
    621 	uint32_t sc_tipg;		/* prototype TIPG register */
    622 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    623 	uint32_t sc_pba;		/* prototype PBA register */
    624 
    625 	int sc_tbi_linkup;		/* TBI link status */
    626 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    627 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    628 
    629 	int sc_mchash_type;		/* multicast filter offset */
    630 
    631 	krndsource_t rnd_source;	/* random source */
    632 
    633 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    634 
    635 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    636 	kmutex_t *sc_ich_phymtx;	/*
    637 					 * 82574/82583/ICH/PCH specific PHY
    638 					 * mutex. For 82574/82583, the mutex
    639 					 * is used for both PHY and NVM.
    640 					 */
    641 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    642 
    643 	struct wm_phyop phy;
    644 	struct wm_nvmop nvm;
    645 #ifdef WM_DEBUG
    646 	uint32_t sc_debug;
    647 #endif
    648 };
    649 
    650 #define WM_CORE_LOCK(_sc)						\
    651 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    652 #define WM_CORE_UNLOCK(_sc)						\
    653 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    654 #define WM_CORE_LOCKED(_sc)						\
    655 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    656 
    657 #define	WM_RXCHAIN_RESET(rxq)						\
    658 do {									\
    659 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    660 	*(rxq)->rxq_tailp = NULL;					\
    661 	(rxq)->rxq_len = 0;						\
    662 } while (/*CONSTCOND*/0)
    663 
    664 #define	WM_RXCHAIN_LINK(rxq, m)						\
    665 do {									\
    666 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    667 	(rxq)->rxq_tailp = &(m)->m_next;				\
    668 } while (/*CONSTCOND*/0)
    669 
    670 #ifdef WM_EVENT_COUNTERS
    671 #ifdef __HAVE_ATOMIC64_LOADSTORE
    672 #define	WM_EVCNT_INCR(ev)						\
    673 	atomic_store_relaxed(&((ev)->ev_count),				\
    674 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    675 #define	WM_EVCNT_ADD(ev, val)						\
    676 	atomic_store_relaxed(&((ev)->ev_count),				\
    677 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    678 #else
    679 #define	WM_EVCNT_INCR(ev)						\
    680 	((ev)->ev_count)++
    681 #define	WM_EVCNT_ADD(ev, val)						\
    682 	(ev)->ev_count += (val)
    683 #endif
    684 
    685 #define WM_Q_EVCNT_INCR(qname, evname)			\
    686 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    687 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    688 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    689 #else /* !WM_EVENT_COUNTERS */
    690 #define	WM_EVCNT_INCR(ev)	/* nothing */
    691 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    692 
    693 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    694 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    695 #endif /* !WM_EVENT_COUNTERS */
    696 
    697 #define	CSR_READ(sc, reg)						\
    698 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    699 #define	CSR_WRITE(sc, reg, val)						\
    700 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    701 #define	CSR_WRITE_FLUSH(sc)						\
    702 	(void)CSR_READ((sc), WMREG_STATUS)
    703 
    704 #define ICH8_FLASH_READ32(sc, reg)					\
    705 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    706 	    (reg) + sc->sc_flashreg_offset)
    707 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    708 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    709 	    (reg) + sc->sc_flashreg_offset, (data))
    710 
    711 #define ICH8_FLASH_READ16(sc, reg)					\
    712 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    713 	    (reg) + sc->sc_flashreg_offset)
    714 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    715 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    716 	    (reg) + sc->sc_flashreg_offset, (data))
    717 
    718 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    719 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    720 
    721 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    722 #define	WM_CDTXADDR_HI(txq, x)						\
    723 	(sizeof(bus_addr_t) == 8 ?					\
    724 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    725 
    726 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    727 #define	WM_CDRXADDR_HI(rxq, x)						\
    728 	(sizeof(bus_addr_t) == 8 ?					\
    729 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    730 
    731 /*
    732  * Register read/write functions.
    733  * Other than CSR_{READ|WRITE}().
    734  */
    735 #if 0
    736 static inline uint32_t wm_io_read(struct wm_softc *, int);
    737 #endif
    738 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    739 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    740     uint32_t, uint32_t);
    741 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    742 
    743 /*
    744  * Descriptor sync/init functions.
    745  */
    746 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    747 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    748 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    749 
    750 /*
    751  * Device driver interface functions and commonly used functions.
    752  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    753  */
    754 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    755 static int	wm_match(device_t, cfdata_t, void *);
    756 static void	wm_attach(device_t, device_t, void *);
    757 static int	wm_detach(device_t, int);
    758 static bool	wm_suspend(device_t, const pmf_qual_t *);
    759 static bool	wm_resume(device_t, const pmf_qual_t *);
    760 static void	wm_watchdog(struct ifnet *);
    761 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    762     uint16_t *);
    763 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    764     uint16_t *);
    765 static void	wm_tick(void *);
    766 static int	wm_ifflags_cb(struct ethercom *);
    767 static int	wm_ioctl(struct ifnet *, u_long, void *);
    768 /* MAC address related */
    769 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    770 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    771 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    772 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    773 static int	wm_rar_count(struct wm_softc *);
    774 static void	wm_set_filter(struct wm_softc *);
    775 /* Reset and init related */
    776 static void	wm_set_vlan(struct wm_softc *);
    777 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    778 static void	wm_get_auto_rd_done(struct wm_softc *);
    779 static void	wm_lan_init_done(struct wm_softc *);
    780 static void	wm_get_cfg_done(struct wm_softc *);
    781 static int	wm_phy_post_reset(struct wm_softc *);
    782 static int	wm_write_smbus_addr(struct wm_softc *);
    783 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    784 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    785 static void	wm_initialize_hardware_bits(struct wm_softc *);
    786 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    787 static int	wm_reset_phy(struct wm_softc *);
    788 static void	wm_flush_desc_rings(struct wm_softc *);
    789 static void	wm_reset(struct wm_softc *);
    790 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    791 static void	wm_rxdrain(struct wm_rxqueue *);
    792 static void	wm_init_rss(struct wm_softc *);
    793 static void	wm_adjust_qnum(struct wm_softc *, int);
    794 static inline bool	wm_is_using_msix(struct wm_softc *);
    795 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    796 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    797 static int	wm_setup_legacy(struct wm_softc *);
    798 static int	wm_setup_msix(struct wm_softc *);
    799 static int	wm_init(struct ifnet *);
    800 static int	wm_init_locked(struct ifnet *);
    801 static void	wm_init_sysctls(struct wm_softc *);
    802 static void	wm_unset_stopping_flags(struct wm_softc *);
    803 static void	wm_set_stopping_flags(struct wm_softc *);
    804 static void	wm_stop(struct ifnet *, int);
    805 static void	wm_stop_locked(struct ifnet *, bool, bool);
    806 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    807 static void	wm_82547_txfifo_stall(void *);
    808 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    809 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    810 /* DMA related */
    811 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    812 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    813 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    814 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    815     struct wm_txqueue *);
    816 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    817 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    818 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    819     struct wm_rxqueue *);
    820 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    821 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    822 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    823 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    824 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    825 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    826 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    827     struct wm_txqueue *);
    828 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    829     struct wm_rxqueue *);
    830 static int	wm_alloc_txrx_queues(struct wm_softc *);
    831 static void	wm_free_txrx_queues(struct wm_softc *);
    832 static int	wm_init_txrx_queues(struct wm_softc *);
    833 /* Start */
    834 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    835     struct wm_txsoft *, uint32_t *, uint8_t *);
    836 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    837 static void	wm_start(struct ifnet *);
    838 static void	wm_start_locked(struct ifnet *);
    839 static int	wm_transmit(struct ifnet *, struct mbuf *);
    840 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    841 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    842 		    bool);
    843 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    844     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    845 static void	wm_nq_start(struct ifnet *);
    846 static void	wm_nq_start_locked(struct ifnet *);
    847 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    848 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    849 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    850 		    bool);
    851 static void	wm_deferred_start_locked(struct wm_txqueue *);
    852 static void	wm_handle_queue(void *);
    853 static void	wm_handle_queue_work(struct work *, void *);
    854 /* Interrupt */
    855 static bool	wm_txeof(struct wm_txqueue *, u_int);
    856 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    857 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    858 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    859 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    860 static void	wm_linkintr(struct wm_softc *, uint32_t);
    861 static int	wm_intr_legacy(void *);
    862 static inline void	wm_txrxintr_disable(struct wm_queue *);
    863 static inline void	wm_txrxintr_enable(struct wm_queue *);
    864 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    865 static int	wm_txrxintr_msix(void *);
    866 static int	wm_linkintr_msix(void *);
    867 
    868 /*
    869  * Media related.
    870  * GMII, SGMII, TBI, SERDES and SFP.
    871  */
    872 /* Common */
    873 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    874 /* GMII related */
    875 static void	wm_gmii_reset(struct wm_softc *);
    876 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    877 static int	wm_get_phy_id_82575(struct wm_softc *);
    878 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    879 static int	wm_gmii_mediachange(struct ifnet *);
    880 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    881 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    882 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    883 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    884 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    885 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    886 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    887 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    888 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    889 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    890 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    891 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    892 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    893 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    894 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    895 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    896 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    897 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    898 	bool);
    899 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    900 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    901 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    902 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    903 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    904 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    905 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    906 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    907 static void	wm_gmii_statchg(struct ifnet *);
    908 /*
    909  * kumeran related (80003, ICH* and PCH*).
    910  * These functions are not for accessing MII registers but for accessing
    911  * kumeran specific registers.
    912  */
    913 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    914 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    915 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    916 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    917 /* EMI register related */
    918 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    919 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    920 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    921 /* SGMII */
    922 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    923 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    924 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    925 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    926 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    927 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    928 /* TBI related */
    929 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    930 static void	wm_tbi_mediainit(struct wm_softc *);
    931 static int	wm_tbi_mediachange(struct ifnet *);
    932 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    933 static int	wm_check_for_link(struct wm_softc *);
    934 static void	wm_tbi_tick(struct wm_softc *);
    935 /* SERDES related */
    936 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    937 static int	wm_serdes_mediachange(struct ifnet *);
    938 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    939 static void	wm_serdes_tick(struct wm_softc *);
    940 /* SFP related */
    941 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    942 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    943 
    944 /*
    945  * NVM related.
    946  * Microwire, SPI (w/wo EERD) and Flash.
    947  */
    948 /* Misc functions */
    949 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    950 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    951 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    952 /* Microwire */
    953 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    954 /* SPI */
    955 static int	wm_nvm_ready_spi(struct wm_softc *);
    956 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    957 /* Using with EERD */
    958 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    959 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    960 /* Flash */
    961 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    962     unsigned int *);
    963 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    964 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    965 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    966     uint32_t *);
    967 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    968 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    969 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    970 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    971 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    972 /* iNVM */
    973 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    974 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    975 /* Lock, detecting NVM type, validate checksum and read */
    976 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    977 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    978 static int	wm_nvm_validate_checksum(struct wm_softc *);
    979 static void	wm_nvm_version_invm(struct wm_softc *);
    980 static void	wm_nvm_version(struct wm_softc *);
    981 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    982 
    983 /*
    984  * Hardware semaphores.
    985  * Very complexed...
    986  */
    987 static int	wm_get_null(struct wm_softc *);
    988 static void	wm_put_null(struct wm_softc *);
    989 static int	wm_get_eecd(struct wm_softc *);
    990 static void	wm_put_eecd(struct wm_softc *);
    991 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    992 static void	wm_put_swsm_semaphore(struct wm_softc *);
    993 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    994 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    995 static int	wm_get_nvm_80003(struct wm_softc *);
    996 static void	wm_put_nvm_80003(struct wm_softc *);
    997 static int	wm_get_nvm_82571(struct wm_softc *);
    998 static void	wm_put_nvm_82571(struct wm_softc *);
    999 static int	wm_get_phy_82575(struct wm_softc *);
   1000 static void	wm_put_phy_82575(struct wm_softc *);
   1001 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1002 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1003 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1004 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1005 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1006 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1007 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1008 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1009 
   1010 /*
   1011  * Management mode and power management related subroutines.
   1012  * BMC, AMT, suspend/resume and EEE.
   1013  */
   1014 #if 0
   1015 static int	wm_check_mng_mode(struct wm_softc *);
   1016 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1017 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1018 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1019 #endif
   1020 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1021 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1022 static void	wm_get_hw_control(struct wm_softc *);
   1023 static void	wm_release_hw_control(struct wm_softc *);
   1024 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1025 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1026 static void	wm_init_manageability(struct wm_softc *);
   1027 static void	wm_release_manageability(struct wm_softc *);
   1028 static void	wm_get_wakeup(struct wm_softc *);
   1029 static int	wm_ulp_disable(struct wm_softc *);
   1030 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1031 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1032 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1033 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1034 static void	wm_enable_wakeup(struct wm_softc *);
   1035 static void	wm_disable_aspm(struct wm_softc *);
   1036 /* LPLU (Low Power Link Up) */
   1037 static void	wm_lplu_d0_disable(struct wm_softc *);
   1038 /* EEE */
   1039 static int	wm_set_eee_i350(struct wm_softc *);
   1040 static int	wm_set_eee_pchlan(struct wm_softc *);
   1041 static int	wm_set_eee(struct wm_softc *);
   1042 
   1043 /*
   1044  * Workarounds (mainly PHY related).
   1045  * Basically, PHY's workarounds are in the PHY drivers.
   1046  */
   1047 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1048 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1049 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1050 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1051 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1052 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1053 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1054 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1055 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1056 static int	wm_k1_workaround_lv(struct wm_softc *);
   1057 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1058 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1059 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1060 static void	wm_reset_init_script_82575(struct wm_softc *);
   1061 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1062 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1063 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1064 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1065 static int	wm_pll_workaround_i210(struct wm_softc *);
   1066 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1067 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1068 static void	wm_set_linkdown_discard(struct wm_softc *);
   1069 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1070 
   1071 #ifdef WM_DEBUG
   1072 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1073 #endif
   1074 
   1075 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1076     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1077 
   1078 /*
   1079  * Devices supported by this driver.
   1080  */
   1081 static const struct wm_product {
   1082 	pci_vendor_id_t		wmp_vendor;
   1083 	pci_product_id_t	wmp_product;
   1084 	const char		*wmp_name;
   1085 	wm_chip_type		wmp_type;
   1086 	uint32_t		wmp_flags;
   1087 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1088 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1089 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1090 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1091 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1092 } wm_products[] = {
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1094 	  "Intel i82542 1000BASE-X Ethernet",
   1095 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1096 
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1098 	  "Intel i82543GC 1000BASE-X Ethernet",
   1099 	  WM_T_82543,		WMP_F_FIBER },
   1100 
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1102 	  "Intel i82543GC 1000BASE-T Ethernet",
   1103 	  WM_T_82543,		WMP_F_COPPER },
   1104 
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1106 	  "Intel i82544EI 1000BASE-T Ethernet",
   1107 	  WM_T_82544,		WMP_F_COPPER },
   1108 
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1110 	  "Intel i82544EI 1000BASE-X Ethernet",
   1111 	  WM_T_82544,		WMP_F_FIBER },
   1112 
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1114 	  "Intel i82544GC 1000BASE-T Ethernet",
   1115 	  WM_T_82544,		WMP_F_COPPER },
   1116 
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1118 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1119 	  WM_T_82544,		WMP_F_COPPER },
   1120 
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1122 	  "Intel i82540EM 1000BASE-T Ethernet",
   1123 	  WM_T_82540,		WMP_F_COPPER },
   1124 
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1126 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1127 	  WM_T_82540,		WMP_F_COPPER },
   1128 
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1130 	  "Intel i82540EP 1000BASE-T Ethernet",
   1131 	  WM_T_82540,		WMP_F_COPPER },
   1132 
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1134 	  "Intel i82540EP 1000BASE-T Ethernet",
   1135 	  WM_T_82540,		WMP_F_COPPER },
   1136 
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1138 	  "Intel i82540EP 1000BASE-T Ethernet",
   1139 	  WM_T_82540,		WMP_F_COPPER },
   1140 
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1142 	  "Intel i82545EM 1000BASE-T Ethernet",
   1143 	  WM_T_82545,		WMP_F_COPPER },
   1144 
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1146 	  "Intel i82545GM 1000BASE-T Ethernet",
   1147 	  WM_T_82545_3,		WMP_F_COPPER },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1150 	  "Intel i82545GM 1000BASE-X Ethernet",
   1151 	  WM_T_82545_3,		WMP_F_FIBER },
   1152 
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1154 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1155 	  WM_T_82545_3,		WMP_F_SERDES },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1158 	  "Intel i82546EB 1000BASE-T Ethernet",
   1159 	  WM_T_82546,		WMP_F_COPPER },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1162 	  "Intel i82546EB 1000BASE-T Ethernet",
   1163 	  WM_T_82546,		WMP_F_COPPER },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1166 	  "Intel i82545EM 1000BASE-X Ethernet",
   1167 	  WM_T_82545,		WMP_F_FIBER },
   1168 
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1170 	  "Intel i82546EB 1000BASE-X Ethernet",
   1171 	  WM_T_82546,		WMP_F_FIBER },
   1172 
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1174 	  "Intel i82546GB 1000BASE-T Ethernet",
   1175 	  WM_T_82546_3,		WMP_F_COPPER },
   1176 
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1178 	  "Intel i82546GB 1000BASE-X Ethernet",
   1179 	  WM_T_82546_3,		WMP_F_FIBER },
   1180 
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1182 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1183 	  WM_T_82546_3,		WMP_F_SERDES },
   1184 
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1186 	  "i82546GB quad-port Gigabit Ethernet",
   1187 	  WM_T_82546_3,		WMP_F_COPPER },
   1188 
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1190 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1191 	  WM_T_82546_3,		WMP_F_COPPER },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1194 	  "Intel PRO/1000MT (82546GB)",
   1195 	  WM_T_82546_3,		WMP_F_COPPER },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1198 	  "Intel i82541EI 1000BASE-T Ethernet",
   1199 	  WM_T_82541,		WMP_F_COPPER },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1202 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1203 	  WM_T_82541,		WMP_F_COPPER },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1206 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1207 	  WM_T_82541,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1210 	  "Intel i82541ER 1000BASE-T Ethernet",
   1211 	  WM_T_82541_2,		WMP_F_COPPER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1214 	  "Intel i82541GI 1000BASE-T Ethernet",
   1215 	  WM_T_82541_2,		WMP_F_COPPER },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1218 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1219 	  WM_T_82541_2,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1222 	  "Intel i82541PI 1000BASE-T Ethernet",
   1223 	  WM_T_82541_2,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1226 	  "Intel i82547EI 1000BASE-T Ethernet",
   1227 	  WM_T_82547,		WMP_F_COPPER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1230 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1231 	  WM_T_82547,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1234 	  "Intel i82547GI 1000BASE-T Ethernet",
   1235 	  WM_T_82547_2,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1238 	  "Intel PRO/1000 PT (82571EB)",
   1239 	  WM_T_82571,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1242 	  "Intel PRO/1000 PF (82571EB)",
   1243 	  WM_T_82571,		WMP_F_FIBER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1246 	  "Intel PRO/1000 PB (82571EB)",
   1247 	  WM_T_82571,		WMP_F_SERDES },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1250 	  "Intel PRO/1000 QT (82571EB)",
   1251 	  WM_T_82571,		WMP_F_COPPER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1254 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1255 	  WM_T_82571,		WMP_F_COPPER },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1258 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1259 	  WM_T_82571,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1262 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1263 	  WM_T_82571,		WMP_F_SERDES },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1266 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1267 	  WM_T_82571,		WMP_F_SERDES },
   1268 
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1270 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1271 	  WM_T_82571,		WMP_F_FIBER },
   1272 
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1274 	  "Intel i82572EI 1000baseT Ethernet",
   1275 	  WM_T_82572,		WMP_F_COPPER },
   1276 
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1278 	  "Intel i82572EI 1000baseX Ethernet",
   1279 	  WM_T_82572,		WMP_F_FIBER },
   1280 
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1282 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1283 	  WM_T_82572,		WMP_F_SERDES },
   1284 
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1286 	  "Intel i82572EI 1000baseT Ethernet",
   1287 	  WM_T_82572,		WMP_F_COPPER },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1290 	  "Intel i82573E",
   1291 	  WM_T_82573,		WMP_F_COPPER },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1294 	  "Intel i82573E IAMT",
   1295 	  WM_T_82573,		WMP_F_COPPER },
   1296 
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1298 	  "Intel i82573L Gigabit Ethernet",
   1299 	  WM_T_82573,		WMP_F_COPPER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1302 	  "Intel i82574L",
   1303 	  WM_T_82574,		WMP_F_COPPER },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1306 	  "Intel i82574L",
   1307 	  WM_T_82574,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1310 	  "Intel i82583V",
   1311 	  WM_T_82583,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1314 	  "i80003 dual 1000baseT Ethernet",
   1315 	  WM_T_80003,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1318 	  "i80003 dual 1000baseX Ethernet",
   1319 	  WM_T_80003,		WMP_F_COPPER },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1322 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1323 	  WM_T_80003,		WMP_F_SERDES },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1326 	  "Intel i80003 1000baseT Ethernet",
   1327 	  WM_T_80003,		WMP_F_COPPER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1330 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1331 	  WM_T_80003,		WMP_F_SERDES },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1334 	  "Intel i82801H (M_AMT) LAN Controller",
   1335 	  WM_T_ICH8,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1337 	  "Intel i82801H (AMT) LAN Controller",
   1338 	  WM_T_ICH8,		WMP_F_COPPER },
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1340 	  "Intel i82801H LAN Controller",
   1341 	  WM_T_ICH8,		WMP_F_COPPER },
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1343 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1344 	  WM_T_ICH8,		WMP_F_COPPER },
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1346 	  "Intel i82801H (M) LAN Controller",
   1347 	  WM_T_ICH8,		WMP_F_COPPER },
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1349 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1350 	  WM_T_ICH8,		WMP_F_COPPER },
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1352 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1353 	  WM_T_ICH8,		WMP_F_COPPER },
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1355 	  "82567V-3 LAN Controller",
   1356 	  WM_T_ICH8,		WMP_F_COPPER },
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1358 	  "82801I (AMT) LAN Controller",
   1359 	  WM_T_ICH9,		WMP_F_COPPER },
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1361 	  "82801I 10/100 LAN Controller",
   1362 	  WM_T_ICH9,		WMP_F_COPPER },
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1364 	  "82801I (G) 10/100 LAN Controller",
   1365 	  WM_T_ICH9,		WMP_F_COPPER },
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1367 	  "82801I (GT) 10/100 LAN Controller",
   1368 	  WM_T_ICH9,		WMP_F_COPPER },
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1370 	  "82801I (C) LAN Controller",
   1371 	  WM_T_ICH9,		WMP_F_COPPER },
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1373 	  "82801I mobile LAN Controller",
   1374 	  WM_T_ICH9,		WMP_F_COPPER },
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1376 	  "82801I mobile (V) LAN Controller",
   1377 	  WM_T_ICH9,		WMP_F_COPPER },
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1379 	  "82801I mobile (AMT) LAN Controller",
   1380 	  WM_T_ICH9,		WMP_F_COPPER },
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1382 	  "82567LM-4 LAN Controller",
   1383 	  WM_T_ICH9,		WMP_F_COPPER },
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1385 	  "82567LM-2 LAN Controller",
   1386 	  WM_T_ICH10,		WMP_F_COPPER },
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1388 	  "82567LF-2 LAN Controller",
   1389 	  WM_T_ICH10,		WMP_F_COPPER },
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1391 	  "82567LM-3 LAN Controller",
   1392 	  WM_T_ICH10,		WMP_F_COPPER },
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1394 	  "82567LF-3 LAN Controller",
   1395 	  WM_T_ICH10,		WMP_F_COPPER },
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1397 	  "82567V-2 LAN Controller",
   1398 	  WM_T_ICH10,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1400 	  "82567V-3? LAN Controller",
   1401 	  WM_T_ICH10,		WMP_F_COPPER },
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1403 	  "HANKSVILLE LAN Controller",
   1404 	  WM_T_ICH10,		WMP_F_COPPER },
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1406 	  "PCH LAN (82577LM) Controller",
   1407 	  WM_T_PCH,		WMP_F_COPPER },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1409 	  "PCH LAN (82577LC) Controller",
   1410 	  WM_T_PCH,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1412 	  "PCH LAN (82578DM) Controller",
   1413 	  WM_T_PCH,		WMP_F_COPPER },
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1415 	  "PCH LAN (82578DC) Controller",
   1416 	  WM_T_PCH,		WMP_F_COPPER },
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1418 	  "PCH2 LAN (82579LM) Controller",
   1419 	  WM_T_PCH2,		WMP_F_COPPER },
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1421 	  "PCH2 LAN (82579V) Controller",
   1422 	  WM_T_PCH2,		WMP_F_COPPER },
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1424 	  "82575EB dual-1000baseT Ethernet",
   1425 	  WM_T_82575,		WMP_F_COPPER },
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1427 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1428 	  WM_T_82575,		WMP_F_SERDES },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1430 	  "82575GB quad-1000baseT Ethernet",
   1431 	  WM_T_82575,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1433 	  "82575GB quad-1000baseT Ethernet (PM)",
   1434 	  WM_T_82575,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1436 	  "82576 1000BaseT Ethernet",
   1437 	  WM_T_82576,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1439 	  "82576 1000BaseX Ethernet",
   1440 	  WM_T_82576,		WMP_F_FIBER },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1443 	  "82576 gigabit Ethernet (SERDES)",
   1444 	  WM_T_82576,		WMP_F_SERDES },
   1445 
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1447 	  "82576 quad-1000BaseT Ethernet",
   1448 	  WM_T_82576,		WMP_F_COPPER },
   1449 
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1451 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1452 	  WM_T_82576,		WMP_F_COPPER },
   1453 
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1455 	  "82576 gigabit Ethernet",
   1456 	  WM_T_82576,		WMP_F_COPPER },
   1457 
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1459 	  "82576 gigabit Ethernet (SERDES)",
   1460 	  WM_T_82576,		WMP_F_SERDES },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1462 	  "82576 quad-gigabit Ethernet (SERDES)",
   1463 	  WM_T_82576,		WMP_F_SERDES },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1466 	  "82580 1000BaseT Ethernet",
   1467 	  WM_T_82580,		WMP_F_COPPER },
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1469 	  "82580 1000BaseX Ethernet",
   1470 	  WM_T_82580,		WMP_F_FIBER },
   1471 
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1473 	  "82580 1000BaseT Ethernet (SERDES)",
   1474 	  WM_T_82580,		WMP_F_SERDES },
   1475 
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1477 	  "82580 gigabit Ethernet (SGMII)",
   1478 	  WM_T_82580,		WMP_F_COPPER },
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1480 	  "82580 dual-1000BaseT Ethernet",
   1481 	  WM_T_82580,		WMP_F_COPPER },
   1482 
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1484 	  "82580 quad-1000BaseX Ethernet",
   1485 	  WM_T_82580,		WMP_F_FIBER },
   1486 
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1488 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1489 	  WM_T_82580,		WMP_F_COPPER },
   1490 
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1492 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1493 	  WM_T_82580,		WMP_F_SERDES },
   1494 
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1496 	  "DH89XXCC 1000BASE-KX Ethernet",
   1497 	  WM_T_82580,		WMP_F_SERDES },
   1498 
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1500 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1501 	  WM_T_82580,		WMP_F_SERDES },
   1502 
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1504 	  "I350 Gigabit Network Connection",
   1505 	  WM_T_I350,		WMP_F_COPPER },
   1506 
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1508 	  "I350 Gigabit Fiber Network Connection",
   1509 	  WM_T_I350,		WMP_F_FIBER },
   1510 
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1512 	  "I350 Gigabit Backplane Connection",
   1513 	  WM_T_I350,		WMP_F_SERDES },
   1514 
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1516 	  "I350 Quad Port Gigabit Ethernet",
   1517 	  WM_T_I350,		WMP_F_SERDES },
   1518 
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1520 	  "I350 Gigabit Connection",
   1521 	  WM_T_I350,		WMP_F_COPPER },
   1522 
   1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1524 	  "I354 Gigabit Ethernet (KX)",
   1525 	  WM_T_I354,		WMP_F_SERDES },
   1526 
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1528 	  "I354 Gigabit Ethernet (SGMII)",
   1529 	  WM_T_I354,		WMP_F_COPPER },
   1530 
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1532 	  "I354 Gigabit Ethernet (2.5G)",
   1533 	  WM_T_I354,		WMP_F_COPPER },
   1534 
   1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1536 	  "I210-T1 Ethernet Server Adapter",
   1537 	  WM_T_I210,		WMP_F_COPPER },
   1538 
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1540 	  "I210 Ethernet (Copper OEM)",
   1541 	  WM_T_I210,		WMP_F_COPPER },
   1542 
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1544 	  "I210 Ethernet (Copper IT)",
   1545 	  WM_T_I210,		WMP_F_COPPER },
   1546 
   1547 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1548 	  "I210 Ethernet (Copper, FLASH less)",
   1549 	  WM_T_I210,		WMP_F_COPPER },
   1550 
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1552 	  "I210 Gigabit Ethernet (Fiber)",
   1553 	  WM_T_I210,		WMP_F_FIBER },
   1554 
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1556 	  "I210 Gigabit Ethernet (SERDES)",
   1557 	  WM_T_I210,		WMP_F_SERDES },
   1558 
   1559 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1560 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1561 	  WM_T_I210,		WMP_F_SERDES },
   1562 
   1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1564 	  "I210 Gigabit Ethernet (SGMII)",
   1565 	  WM_T_I210,		WMP_F_COPPER },
   1566 
   1567 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1568 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1569 	  WM_T_I210,		WMP_F_COPPER },
   1570 
   1571 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1572 	  "I211 Ethernet (COPPER)",
   1573 	  WM_T_I211,		WMP_F_COPPER },
   1574 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1575 	  "I217 V Ethernet Connection",
   1576 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1577 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1578 	  "I217 LM Ethernet Connection",
   1579 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1580 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1581 	  "I218 V Ethernet Connection",
   1582 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1583 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1584 	  "I218 V Ethernet Connection",
   1585 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1586 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1587 	  "I218 V Ethernet Connection",
   1588 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1589 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1590 	  "I218 LM Ethernet Connection",
   1591 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1592 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1593 	  "I218 LM Ethernet Connection",
   1594 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1595 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1596 	  "I218 LM Ethernet Connection",
   1597 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1598 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1599 	  "I219 LM Ethernet Connection",
   1600 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1601 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1602 	  "I219 LM (2) Ethernet Connection",
   1603 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1604 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1605 	  "I219 LM (3) Ethernet Connection",
   1606 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1607 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1608 	  "I219 LM (4) Ethernet Connection",
   1609 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1610 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1611 	  "I219 LM (5) Ethernet Connection",
   1612 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1613 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1614 	  "I219 LM (6) Ethernet Connection",
   1615 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1616 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1617 	  "I219 LM (7) Ethernet Connection",
   1618 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1619 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1620 	  "I219 LM (8) Ethernet Connection",
   1621 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1622 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1623 	  "I219 LM (9) Ethernet Connection",
   1624 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1625 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1626 	  "I219 LM (10) Ethernet Connection",
   1627 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1628 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1629 	  "I219 LM (11) Ethernet Connection",
   1630 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1632 	  "I219 LM (12) Ethernet Connection",
   1633 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1635 	  "I219 LM (13) Ethernet Connection",
   1636 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1638 	  "I219 LM (14) Ethernet Connection",
   1639 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1640 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1641 	  "I219 LM (15) Ethernet Connection",
   1642 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1644 	  "I219 V Ethernet Connection",
   1645 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1647 	  "I219 V (2) Ethernet Connection",
   1648 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1650 	  "I219 V (4) Ethernet Connection",
   1651 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1652 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1653 	  "I219 V (5) Ethernet Connection",
   1654 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1656 	  "I219 V (6) Ethernet Connection",
   1657 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1659 	  "I219 V (7) Ethernet Connection",
   1660 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1662 	  "I219 V (8) Ethernet Connection",
   1663 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1664 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1665 	  "I219 V (9) Ethernet Connection",
   1666 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1668 	  "I219 V (10) Ethernet Connection",
   1669 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1671 	  "I219 V (11) Ethernet Connection",
   1672 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1674 	  "I219 V (12) Ethernet Connection",
   1675 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1676 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1677 	  "I219 V (13) Ethernet Connection",
   1678 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1679 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1680 	  "I219 V (14) Ethernet Connection",
   1681 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1682 	{ 0,			0,
   1683 	  NULL,
   1684 	  0,			0 },
   1685 };
   1686 
   1687 /*
   1688  * Register read/write functions.
   1689  * Other than CSR_{READ|WRITE}().
   1690  */
   1691 
   1692 #if 0 /* Not currently used */
   1693 static inline uint32_t
   1694 wm_io_read(struct wm_softc *sc, int reg)
   1695 {
   1696 
   1697 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1698 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1699 }
   1700 #endif
   1701 
   1702 static inline void
   1703 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1704 {
   1705 
   1706 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1707 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1708 }
   1709 
   1710 static inline void
   1711 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1712     uint32_t data)
   1713 {
   1714 	uint32_t regval;
   1715 	int i;
   1716 
   1717 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1718 
   1719 	CSR_WRITE(sc, reg, regval);
   1720 
   1721 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1722 		delay(5);
   1723 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1724 			break;
   1725 	}
   1726 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1727 		aprint_error("%s: WARNING:"
   1728 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1729 		    device_xname(sc->sc_dev), reg);
   1730 	}
   1731 }
   1732 
   1733 static inline void
   1734 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1735 {
   1736 	wa->wa_low = htole32(v & 0xffffffffU);
   1737 	if (sizeof(bus_addr_t) == 8)
   1738 		wa->wa_high = htole32((uint64_t) v >> 32);
   1739 	else
   1740 		wa->wa_high = 0;
   1741 }
   1742 
   1743 /*
   1744  * Descriptor sync/init functions.
   1745  */
   1746 static inline void
   1747 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1748 {
   1749 	struct wm_softc *sc = txq->txq_sc;
   1750 
   1751 	/* If it will wrap around, sync to the end of the ring. */
   1752 	if ((start + num) > WM_NTXDESC(txq)) {
   1753 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1754 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1755 		    (WM_NTXDESC(txq) - start), ops);
   1756 		num -= (WM_NTXDESC(txq) - start);
   1757 		start = 0;
   1758 	}
   1759 
   1760 	/* Now sync whatever is left. */
   1761 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1762 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1763 }
   1764 
   1765 static inline void
   1766 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1767 {
   1768 	struct wm_softc *sc = rxq->rxq_sc;
   1769 
   1770 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1771 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1772 }
   1773 
   1774 static inline void
   1775 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1776 {
   1777 	struct wm_softc *sc = rxq->rxq_sc;
   1778 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1779 	struct mbuf *m = rxs->rxs_mbuf;
   1780 
   1781 	/*
   1782 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1783 	 * so that the payload after the Ethernet header is aligned
   1784 	 * to a 4-byte boundary.
   1785 
   1786 	 * XXX BRAINDAMAGE ALERT!
   1787 	 * The stupid chip uses the same size for every buffer, which
   1788 	 * is set in the Receive Control register.  We are using the 2K
   1789 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1790 	 * reason, we can't "scoot" packets longer than the standard
   1791 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1792 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1793 	 * the upper layer copy the headers.
   1794 	 */
   1795 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1796 
   1797 	if (sc->sc_type == WM_T_82574) {
   1798 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1799 		rxd->erx_data.erxd_addr =
   1800 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1801 		rxd->erx_data.erxd_dd = 0;
   1802 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1803 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1804 
   1805 		rxd->nqrx_data.nrxd_paddr =
   1806 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1807 		/* Currently, split header is not supported. */
   1808 		rxd->nqrx_data.nrxd_haddr = 0;
   1809 	} else {
   1810 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1811 
   1812 		wm_set_dma_addr(&rxd->wrx_addr,
   1813 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1814 		rxd->wrx_len = 0;
   1815 		rxd->wrx_cksum = 0;
   1816 		rxd->wrx_status = 0;
   1817 		rxd->wrx_errors = 0;
   1818 		rxd->wrx_special = 0;
   1819 	}
   1820 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1821 
   1822 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1823 }
   1824 
   1825 /*
   1826  * Device driver interface functions and commonly used functions.
   1827  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1828  */
   1829 
   1830 /* Lookup supported device table */
   1831 static const struct wm_product *
   1832 wm_lookup(const struct pci_attach_args *pa)
   1833 {
   1834 	const struct wm_product *wmp;
   1835 
   1836 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1837 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1838 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1839 			return wmp;
   1840 	}
   1841 	return NULL;
   1842 }
   1843 
   1844 /* The match function (ca_match) */
   1845 static int
   1846 wm_match(device_t parent, cfdata_t cf, void *aux)
   1847 {
   1848 	struct pci_attach_args *pa = aux;
   1849 
   1850 	if (wm_lookup(pa) != NULL)
   1851 		return 1;
   1852 
   1853 	return 0;
   1854 }
   1855 
   1856 /* The attach function (ca_attach) */
   1857 static void
   1858 wm_attach(device_t parent, device_t self, void *aux)
   1859 {
   1860 	struct wm_softc *sc = device_private(self);
   1861 	struct pci_attach_args *pa = aux;
   1862 	prop_dictionary_t dict;
   1863 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1864 	pci_chipset_tag_t pc = pa->pa_pc;
   1865 	int counts[PCI_INTR_TYPE_SIZE];
   1866 	pci_intr_type_t max_type;
   1867 	const char *eetype, *xname;
   1868 	bus_space_tag_t memt;
   1869 	bus_space_handle_t memh;
   1870 	bus_size_t memsize;
   1871 	int memh_valid;
   1872 	int i, error;
   1873 	const struct wm_product *wmp;
   1874 	prop_data_t ea;
   1875 	prop_number_t pn;
   1876 	uint8_t enaddr[ETHER_ADDR_LEN];
   1877 	char buf[256];
   1878 	char wqname[MAXCOMLEN];
   1879 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1880 	pcireg_t preg, memtype;
   1881 	uint16_t eeprom_data, apme_mask;
   1882 	bool force_clear_smbi;
   1883 	uint32_t link_mode;
   1884 	uint32_t reg;
   1885 
   1886 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1887 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1888 #endif
   1889 	sc->sc_dev = self;
   1890 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1891 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1892 	sc->sc_core_stopping = false;
   1893 
   1894 	wmp = wm_lookup(pa);
   1895 #ifdef DIAGNOSTIC
   1896 	if (wmp == NULL) {
   1897 		printf("\n");
   1898 		panic("wm_attach: impossible");
   1899 	}
   1900 #endif
   1901 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1902 
   1903 	sc->sc_pc = pa->pa_pc;
   1904 	sc->sc_pcitag = pa->pa_tag;
   1905 
   1906 	if (pci_dma64_available(pa))
   1907 		sc->sc_dmat = pa->pa_dmat64;
   1908 	else
   1909 		sc->sc_dmat = pa->pa_dmat;
   1910 
   1911 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1912 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1913 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1914 
   1915 	sc->sc_type = wmp->wmp_type;
   1916 
   1917 	/* Set default function pointers */
   1918 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1919 	sc->phy.release = sc->nvm.release = wm_put_null;
   1920 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1921 
   1922 	if (sc->sc_type < WM_T_82543) {
   1923 		if (sc->sc_rev < 2) {
   1924 			aprint_error_dev(sc->sc_dev,
   1925 			    "i82542 must be at least rev. 2\n");
   1926 			return;
   1927 		}
   1928 		if (sc->sc_rev < 3)
   1929 			sc->sc_type = WM_T_82542_2_0;
   1930 	}
   1931 
   1932 	/*
   1933 	 * Disable MSI for Errata:
   1934 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1935 	 *
   1936 	 *  82544: Errata 25
   1937 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1938 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1939 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1940 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1941 	 *
   1942 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1943 	 *
   1944 	 *  82571 & 82572: Errata 63
   1945 	 */
   1946 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1947 	    || (sc->sc_type == WM_T_82572))
   1948 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1949 
   1950 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1951 	    || (sc->sc_type == WM_T_82580)
   1952 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1953 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1954 		sc->sc_flags |= WM_F_NEWQUEUE;
   1955 
   1956 	/* Set device properties (mactype) */
   1957 	dict = device_properties(sc->sc_dev);
   1958 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1959 
   1960 	/*
   1961 	 * Map the device.  All devices support memory-mapped acccess,
   1962 	 * and it is really required for normal operation.
   1963 	 */
   1964 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1965 	switch (memtype) {
   1966 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1967 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1968 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1969 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1970 		break;
   1971 	default:
   1972 		memh_valid = 0;
   1973 		break;
   1974 	}
   1975 
   1976 	if (memh_valid) {
   1977 		sc->sc_st = memt;
   1978 		sc->sc_sh = memh;
   1979 		sc->sc_ss = memsize;
   1980 	} else {
   1981 		aprint_error_dev(sc->sc_dev,
   1982 		    "unable to map device registers\n");
   1983 		return;
   1984 	}
   1985 
   1986 	/*
   1987 	 * In addition, i82544 and later support I/O mapped indirect
   1988 	 * register access.  It is not desirable (nor supported in
   1989 	 * this driver) to use it for normal operation, though it is
   1990 	 * required to work around bugs in some chip versions.
   1991 	 */
   1992 	if (sc->sc_type >= WM_T_82544) {
   1993 		/* First we have to find the I/O BAR. */
   1994 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1995 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1996 			if (memtype == PCI_MAPREG_TYPE_IO)
   1997 				break;
   1998 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1999 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2000 				i += 4;	/* skip high bits, too */
   2001 		}
   2002 		if (i < PCI_MAPREG_END) {
   2003 			/*
   2004 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2005 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2006 			 * It's no problem because newer chips has no this
   2007 			 * bug.
   2008 			 *
   2009 			 * The i8254x doesn't apparently respond when the
   2010 			 * I/O BAR is 0, which looks somewhat like it's not
   2011 			 * been configured.
   2012 			 */
   2013 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2014 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2015 				aprint_error_dev(sc->sc_dev,
   2016 				    "WARNING: I/O BAR at zero.\n");
   2017 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2018 					0, &sc->sc_iot, &sc->sc_ioh,
   2019 					NULL, &sc->sc_ios) == 0) {
   2020 				sc->sc_flags |= WM_F_IOH_VALID;
   2021 			} else
   2022 				aprint_error_dev(sc->sc_dev,
   2023 				    "WARNING: unable to map I/O space\n");
   2024 		}
   2025 
   2026 	}
   2027 
   2028 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2029 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2030 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2031 	if (sc->sc_type < WM_T_82542_2_1)
   2032 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2033 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2034 
   2035 	/* Power up chip */
   2036 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2037 	    && error != EOPNOTSUPP) {
   2038 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2039 		return;
   2040 	}
   2041 
   2042 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2043 	/*
   2044 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2045 	 * resource.
   2046 	 */
   2047 	if (sc->sc_nqueues > 1) {
   2048 		max_type = PCI_INTR_TYPE_MSIX;
   2049 		/*
   2050 		 *  82583 has a MSI-X capability in the PCI configuration space
   2051 		 * but it doesn't support it. At least the document doesn't
   2052 		 * say anything about MSI-X.
   2053 		 */
   2054 		counts[PCI_INTR_TYPE_MSIX]
   2055 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2056 	} else {
   2057 		max_type = PCI_INTR_TYPE_MSI;
   2058 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2059 	}
   2060 
   2061 	/* Allocation settings */
   2062 	counts[PCI_INTR_TYPE_MSI] = 1;
   2063 	counts[PCI_INTR_TYPE_INTX] = 1;
   2064 	/* overridden by disable flags */
   2065 	if (wm_disable_msi != 0) {
   2066 		counts[PCI_INTR_TYPE_MSI] = 0;
   2067 		if (wm_disable_msix != 0) {
   2068 			max_type = PCI_INTR_TYPE_INTX;
   2069 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2070 		}
   2071 	} else if (wm_disable_msix != 0) {
   2072 		max_type = PCI_INTR_TYPE_MSI;
   2073 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2074 	}
   2075 
   2076 alloc_retry:
   2077 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2078 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2079 		return;
   2080 	}
   2081 
   2082 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2083 		error = wm_setup_msix(sc);
   2084 		if (error) {
   2085 			pci_intr_release(pc, sc->sc_intrs,
   2086 			    counts[PCI_INTR_TYPE_MSIX]);
   2087 
   2088 			/* Setup for MSI: Disable MSI-X */
   2089 			max_type = PCI_INTR_TYPE_MSI;
   2090 			counts[PCI_INTR_TYPE_MSI] = 1;
   2091 			counts[PCI_INTR_TYPE_INTX] = 1;
   2092 			goto alloc_retry;
   2093 		}
   2094 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2095 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2096 		error = wm_setup_legacy(sc);
   2097 		if (error) {
   2098 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2099 			    counts[PCI_INTR_TYPE_MSI]);
   2100 
   2101 			/* The next try is for INTx: Disable MSI */
   2102 			max_type = PCI_INTR_TYPE_INTX;
   2103 			counts[PCI_INTR_TYPE_INTX] = 1;
   2104 			goto alloc_retry;
   2105 		}
   2106 	} else {
   2107 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2108 		error = wm_setup_legacy(sc);
   2109 		if (error) {
   2110 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2111 			    counts[PCI_INTR_TYPE_INTX]);
   2112 			return;
   2113 		}
   2114 	}
   2115 
   2116 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2117 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2118 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2119 	    WM_WORKQUEUE_FLAGS);
   2120 	if (error) {
   2121 		aprint_error_dev(sc->sc_dev,
   2122 		    "unable to create workqueue\n");
   2123 		goto out;
   2124 	}
   2125 
   2126 	/*
   2127 	 * Check the function ID (unit number of the chip).
   2128 	 */
   2129 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2130 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2131 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2132 	    || (sc->sc_type == WM_T_82580)
   2133 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2134 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2135 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2136 	else
   2137 		sc->sc_funcid = 0;
   2138 
   2139 	/*
   2140 	 * Determine a few things about the bus we're connected to.
   2141 	 */
   2142 	if (sc->sc_type < WM_T_82543) {
   2143 		/* We don't really know the bus characteristics here. */
   2144 		sc->sc_bus_speed = 33;
   2145 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2146 		/*
   2147 		 * CSA (Communication Streaming Architecture) is about as fast
   2148 		 * a 32-bit 66MHz PCI Bus.
   2149 		 */
   2150 		sc->sc_flags |= WM_F_CSA;
   2151 		sc->sc_bus_speed = 66;
   2152 		aprint_verbose_dev(sc->sc_dev,
   2153 		    "Communication Streaming Architecture\n");
   2154 		if (sc->sc_type == WM_T_82547) {
   2155 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2156 			callout_setfunc(&sc->sc_txfifo_ch,
   2157 			    wm_82547_txfifo_stall, sc);
   2158 			aprint_verbose_dev(sc->sc_dev,
   2159 			    "using 82547 Tx FIFO stall work-around\n");
   2160 		}
   2161 	} else if (sc->sc_type >= WM_T_82571) {
   2162 		sc->sc_flags |= WM_F_PCIE;
   2163 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2164 		    && (sc->sc_type != WM_T_ICH10)
   2165 		    && (sc->sc_type != WM_T_PCH)
   2166 		    && (sc->sc_type != WM_T_PCH2)
   2167 		    && (sc->sc_type != WM_T_PCH_LPT)
   2168 		    && (sc->sc_type != WM_T_PCH_SPT)
   2169 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2170 			/* ICH* and PCH* have no PCIe capability registers */
   2171 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2172 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2173 				NULL) == 0)
   2174 				aprint_error_dev(sc->sc_dev,
   2175 				    "unable to find PCIe capability\n");
   2176 		}
   2177 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2178 	} else {
   2179 		reg = CSR_READ(sc, WMREG_STATUS);
   2180 		if (reg & STATUS_BUS64)
   2181 			sc->sc_flags |= WM_F_BUS64;
   2182 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2183 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2184 
   2185 			sc->sc_flags |= WM_F_PCIX;
   2186 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2187 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2188 				aprint_error_dev(sc->sc_dev,
   2189 				    "unable to find PCIX capability\n");
   2190 			else if (sc->sc_type != WM_T_82545_3 &&
   2191 				 sc->sc_type != WM_T_82546_3) {
   2192 				/*
   2193 				 * Work around a problem caused by the BIOS
   2194 				 * setting the max memory read byte count
   2195 				 * incorrectly.
   2196 				 */
   2197 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2198 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2199 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2200 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2201 
   2202 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2203 				    PCIX_CMD_BYTECNT_SHIFT;
   2204 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2205 				    PCIX_STATUS_MAXB_SHIFT;
   2206 				if (bytecnt > maxb) {
   2207 					aprint_verbose_dev(sc->sc_dev,
   2208 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2209 					    512 << bytecnt, 512 << maxb);
   2210 					pcix_cmd = (pcix_cmd &
   2211 					    ~PCIX_CMD_BYTECNT_MASK) |
   2212 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2213 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2214 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2215 					    pcix_cmd);
   2216 				}
   2217 			}
   2218 		}
   2219 		/*
   2220 		 * The quad port adapter is special; it has a PCIX-PCIX
   2221 		 * bridge on the board, and can run the secondary bus at
   2222 		 * a higher speed.
   2223 		 */
   2224 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2225 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2226 								      : 66;
   2227 		} else if (sc->sc_flags & WM_F_PCIX) {
   2228 			switch (reg & STATUS_PCIXSPD_MASK) {
   2229 			case STATUS_PCIXSPD_50_66:
   2230 				sc->sc_bus_speed = 66;
   2231 				break;
   2232 			case STATUS_PCIXSPD_66_100:
   2233 				sc->sc_bus_speed = 100;
   2234 				break;
   2235 			case STATUS_PCIXSPD_100_133:
   2236 				sc->sc_bus_speed = 133;
   2237 				break;
   2238 			default:
   2239 				aprint_error_dev(sc->sc_dev,
   2240 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2241 				    reg & STATUS_PCIXSPD_MASK);
   2242 				sc->sc_bus_speed = 66;
   2243 				break;
   2244 			}
   2245 		} else
   2246 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2247 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2248 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2249 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2250 	}
   2251 
   2252 	/* clear interesting stat counters */
   2253 	CSR_READ(sc, WMREG_COLC);
   2254 	CSR_READ(sc, WMREG_RXERRC);
   2255 
   2256 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2257 	    || (sc->sc_type >= WM_T_ICH8))
   2258 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2259 	if (sc->sc_type >= WM_T_ICH8)
   2260 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2261 
   2262 	/* Set PHY, NVM mutex related stuff */
   2263 	switch (sc->sc_type) {
   2264 	case WM_T_82542_2_0:
   2265 	case WM_T_82542_2_1:
   2266 	case WM_T_82543:
   2267 	case WM_T_82544:
   2268 		/* Microwire */
   2269 		sc->nvm.read = wm_nvm_read_uwire;
   2270 		sc->sc_nvm_wordsize = 64;
   2271 		sc->sc_nvm_addrbits = 6;
   2272 		break;
   2273 	case WM_T_82540:
   2274 	case WM_T_82545:
   2275 	case WM_T_82545_3:
   2276 	case WM_T_82546:
   2277 	case WM_T_82546_3:
   2278 		/* Microwire */
   2279 		sc->nvm.read = wm_nvm_read_uwire;
   2280 		reg = CSR_READ(sc, WMREG_EECD);
   2281 		if (reg & EECD_EE_SIZE) {
   2282 			sc->sc_nvm_wordsize = 256;
   2283 			sc->sc_nvm_addrbits = 8;
   2284 		} else {
   2285 			sc->sc_nvm_wordsize = 64;
   2286 			sc->sc_nvm_addrbits = 6;
   2287 		}
   2288 		sc->sc_flags |= WM_F_LOCK_EECD;
   2289 		sc->nvm.acquire = wm_get_eecd;
   2290 		sc->nvm.release = wm_put_eecd;
   2291 		break;
   2292 	case WM_T_82541:
   2293 	case WM_T_82541_2:
   2294 	case WM_T_82547:
   2295 	case WM_T_82547_2:
   2296 		reg = CSR_READ(sc, WMREG_EECD);
   2297 		/*
   2298 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2299 		 * on 8254[17], so set flags and functios before calling it.
   2300 		 */
   2301 		sc->sc_flags |= WM_F_LOCK_EECD;
   2302 		sc->nvm.acquire = wm_get_eecd;
   2303 		sc->nvm.release = wm_put_eecd;
   2304 		if (reg & EECD_EE_TYPE) {
   2305 			/* SPI */
   2306 			sc->nvm.read = wm_nvm_read_spi;
   2307 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2308 			wm_nvm_set_addrbits_size_eecd(sc);
   2309 		} else {
   2310 			/* Microwire */
   2311 			sc->nvm.read = wm_nvm_read_uwire;
   2312 			if ((reg & EECD_EE_ABITS) != 0) {
   2313 				sc->sc_nvm_wordsize = 256;
   2314 				sc->sc_nvm_addrbits = 8;
   2315 			} else {
   2316 				sc->sc_nvm_wordsize = 64;
   2317 				sc->sc_nvm_addrbits = 6;
   2318 			}
   2319 		}
   2320 		break;
   2321 	case WM_T_82571:
   2322 	case WM_T_82572:
   2323 		/* SPI */
   2324 		sc->nvm.read = wm_nvm_read_eerd;
   2325 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2326 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2327 		wm_nvm_set_addrbits_size_eecd(sc);
   2328 		sc->phy.acquire = wm_get_swsm_semaphore;
   2329 		sc->phy.release = wm_put_swsm_semaphore;
   2330 		sc->nvm.acquire = wm_get_nvm_82571;
   2331 		sc->nvm.release = wm_put_nvm_82571;
   2332 		break;
   2333 	case WM_T_82573:
   2334 	case WM_T_82574:
   2335 	case WM_T_82583:
   2336 		sc->nvm.read = wm_nvm_read_eerd;
   2337 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2338 		if (sc->sc_type == WM_T_82573) {
   2339 			sc->phy.acquire = wm_get_swsm_semaphore;
   2340 			sc->phy.release = wm_put_swsm_semaphore;
   2341 			sc->nvm.acquire = wm_get_nvm_82571;
   2342 			sc->nvm.release = wm_put_nvm_82571;
   2343 		} else {
   2344 			/* Both PHY and NVM use the same semaphore. */
   2345 			sc->phy.acquire = sc->nvm.acquire
   2346 			    = wm_get_swfwhw_semaphore;
   2347 			sc->phy.release = sc->nvm.release
   2348 			    = wm_put_swfwhw_semaphore;
   2349 		}
   2350 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2351 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2352 			sc->sc_nvm_wordsize = 2048;
   2353 		} else {
   2354 			/* SPI */
   2355 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2356 			wm_nvm_set_addrbits_size_eecd(sc);
   2357 		}
   2358 		break;
   2359 	case WM_T_82575:
   2360 	case WM_T_82576:
   2361 	case WM_T_82580:
   2362 	case WM_T_I350:
   2363 	case WM_T_I354:
   2364 	case WM_T_80003:
   2365 		/* SPI */
   2366 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2367 		wm_nvm_set_addrbits_size_eecd(sc);
   2368 		if ((sc->sc_type == WM_T_80003)
   2369 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2370 			sc->nvm.read = wm_nvm_read_eerd;
   2371 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2372 		} else {
   2373 			sc->nvm.read = wm_nvm_read_spi;
   2374 			sc->sc_flags |= WM_F_LOCK_EECD;
   2375 		}
   2376 		sc->phy.acquire = wm_get_phy_82575;
   2377 		sc->phy.release = wm_put_phy_82575;
   2378 		sc->nvm.acquire = wm_get_nvm_80003;
   2379 		sc->nvm.release = wm_put_nvm_80003;
   2380 		break;
   2381 	case WM_T_ICH8:
   2382 	case WM_T_ICH9:
   2383 	case WM_T_ICH10:
   2384 	case WM_T_PCH:
   2385 	case WM_T_PCH2:
   2386 	case WM_T_PCH_LPT:
   2387 		sc->nvm.read = wm_nvm_read_ich8;
   2388 		/* FLASH */
   2389 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2390 		sc->sc_nvm_wordsize = 2048;
   2391 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2392 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2393 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2394 			aprint_error_dev(sc->sc_dev,
   2395 			    "can't map FLASH registers\n");
   2396 			goto out;
   2397 		}
   2398 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2399 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2400 		    ICH_FLASH_SECTOR_SIZE;
   2401 		sc->sc_ich8_flash_bank_size =
   2402 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2403 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2404 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2405 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2406 		sc->sc_flashreg_offset = 0;
   2407 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2408 		sc->phy.release = wm_put_swflag_ich8lan;
   2409 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2410 		sc->nvm.release = wm_put_nvm_ich8lan;
   2411 		break;
   2412 	case WM_T_PCH_SPT:
   2413 	case WM_T_PCH_CNP:
   2414 		sc->nvm.read = wm_nvm_read_spt;
   2415 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2416 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2417 		sc->sc_flasht = sc->sc_st;
   2418 		sc->sc_flashh = sc->sc_sh;
   2419 		sc->sc_ich8_flash_base = 0;
   2420 		sc->sc_nvm_wordsize =
   2421 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2422 		    * NVM_SIZE_MULTIPLIER;
   2423 		/* It is size in bytes, we want words */
   2424 		sc->sc_nvm_wordsize /= 2;
   2425 		/* Assume 2 banks */
   2426 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2427 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2428 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2429 		sc->phy.release = wm_put_swflag_ich8lan;
   2430 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2431 		sc->nvm.release = wm_put_nvm_ich8lan;
   2432 		break;
   2433 	case WM_T_I210:
   2434 	case WM_T_I211:
   2435 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2436 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2437 		if (wm_nvm_flash_presence_i210(sc)) {
   2438 			sc->nvm.read = wm_nvm_read_eerd;
   2439 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2440 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2441 			wm_nvm_set_addrbits_size_eecd(sc);
   2442 		} else {
   2443 			sc->nvm.read = wm_nvm_read_invm;
   2444 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2445 			sc->sc_nvm_wordsize = INVM_SIZE;
   2446 		}
   2447 		sc->phy.acquire = wm_get_phy_82575;
   2448 		sc->phy.release = wm_put_phy_82575;
   2449 		sc->nvm.acquire = wm_get_nvm_80003;
   2450 		sc->nvm.release = wm_put_nvm_80003;
   2451 		break;
   2452 	default:
   2453 		break;
   2454 	}
   2455 
   2456 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2457 	switch (sc->sc_type) {
   2458 	case WM_T_82571:
   2459 	case WM_T_82572:
   2460 		reg = CSR_READ(sc, WMREG_SWSM2);
   2461 		if ((reg & SWSM2_LOCK) == 0) {
   2462 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2463 			force_clear_smbi = true;
   2464 		} else
   2465 			force_clear_smbi = false;
   2466 		break;
   2467 	case WM_T_82573:
   2468 	case WM_T_82574:
   2469 	case WM_T_82583:
   2470 		force_clear_smbi = true;
   2471 		break;
   2472 	default:
   2473 		force_clear_smbi = false;
   2474 		break;
   2475 	}
   2476 	if (force_clear_smbi) {
   2477 		reg = CSR_READ(sc, WMREG_SWSM);
   2478 		if ((reg & SWSM_SMBI) != 0)
   2479 			aprint_error_dev(sc->sc_dev,
   2480 			    "Please update the Bootagent\n");
   2481 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2482 	}
   2483 
   2484 	/*
   2485 	 * Defer printing the EEPROM type until after verifying the checksum
   2486 	 * This allows the EEPROM type to be printed correctly in the case
   2487 	 * that no EEPROM is attached.
   2488 	 */
   2489 	/*
   2490 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2491 	 * this for later, so we can fail future reads from the EEPROM.
   2492 	 */
   2493 	if (wm_nvm_validate_checksum(sc)) {
   2494 		/*
   2495 		 * Read twice again because some PCI-e parts fail the
   2496 		 * first check due to the link being in sleep state.
   2497 		 */
   2498 		if (wm_nvm_validate_checksum(sc))
   2499 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2500 	}
   2501 
   2502 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2503 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2504 	else {
   2505 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2506 		    sc->sc_nvm_wordsize);
   2507 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2508 			aprint_verbose("iNVM");
   2509 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2510 			aprint_verbose("FLASH(HW)");
   2511 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2512 			aprint_verbose("FLASH");
   2513 		else {
   2514 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2515 				eetype = "SPI";
   2516 			else
   2517 				eetype = "MicroWire";
   2518 			aprint_verbose("(%d address bits) %s EEPROM",
   2519 			    sc->sc_nvm_addrbits, eetype);
   2520 		}
   2521 	}
   2522 	wm_nvm_version(sc);
   2523 	aprint_verbose("\n");
   2524 
   2525 	/*
   2526 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2527 	 * incorrect.
   2528 	 */
   2529 	wm_gmii_setup_phytype(sc, 0, 0);
   2530 
   2531 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2532 	switch (sc->sc_type) {
   2533 	case WM_T_ICH8:
   2534 	case WM_T_ICH9:
   2535 	case WM_T_ICH10:
   2536 	case WM_T_PCH:
   2537 	case WM_T_PCH2:
   2538 	case WM_T_PCH_LPT:
   2539 	case WM_T_PCH_SPT:
   2540 	case WM_T_PCH_CNP:
   2541 		apme_mask = WUC_APME;
   2542 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2543 		if ((eeprom_data & apme_mask) != 0)
   2544 			sc->sc_flags |= WM_F_WOL;
   2545 		break;
   2546 	default:
   2547 		break;
   2548 	}
   2549 
   2550 	/* Reset the chip to a known state. */
   2551 	wm_reset(sc);
   2552 
   2553 	/*
   2554 	 * Check for I21[01] PLL workaround.
   2555 	 *
   2556 	 * Three cases:
   2557 	 * a) Chip is I211.
   2558 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2559 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2560 	 */
   2561 	if (sc->sc_type == WM_T_I211)
   2562 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2563 	if (sc->sc_type == WM_T_I210) {
   2564 		if (!wm_nvm_flash_presence_i210(sc))
   2565 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2566 		else if ((sc->sc_nvm_ver_major < 3)
   2567 		    || ((sc->sc_nvm_ver_major == 3)
   2568 			&& (sc->sc_nvm_ver_minor < 25))) {
   2569 			aprint_verbose_dev(sc->sc_dev,
   2570 			    "ROM image version %d.%d is older than 3.25\n",
   2571 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2572 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2573 		}
   2574 	}
   2575 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2576 		wm_pll_workaround_i210(sc);
   2577 
   2578 	wm_get_wakeup(sc);
   2579 
   2580 	/* Non-AMT based hardware can now take control from firmware */
   2581 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2582 		wm_get_hw_control(sc);
   2583 
   2584 	/*
   2585 	 * Read the Ethernet address from the EEPROM, if not first found
   2586 	 * in device properties.
   2587 	 */
   2588 	ea = prop_dictionary_get(dict, "mac-address");
   2589 	if (ea != NULL) {
   2590 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2591 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2592 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2593 	} else {
   2594 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2595 			aprint_error_dev(sc->sc_dev,
   2596 			    "unable to read Ethernet address\n");
   2597 			goto out;
   2598 		}
   2599 	}
   2600 
   2601 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2602 	    ether_sprintf(enaddr));
   2603 
   2604 	/*
   2605 	 * Read the config info from the EEPROM, and set up various
   2606 	 * bits in the control registers based on their contents.
   2607 	 */
   2608 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2609 	if (pn != NULL) {
   2610 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2611 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2612 	} else {
   2613 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2614 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2615 			goto out;
   2616 		}
   2617 	}
   2618 
   2619 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2620 	if (pn != NULL) {
   2621 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2622 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2623 	} else {
   2624 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2625 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2626 			goto out;
   2627 		}
   2628 	}
   2629 
   2630 	/* check for WM_F_WOL */
   2631 	switch (sc->sc_type) {
   2632 	case WM_T_82542_2_0:
   2633 	case WM_T_82542_2_1:
   2634 	case WM_T_82543:
   2635 		/* dummy? */
   2636 		eeprom_data = 0;
   2637 		apme_mask = NVM_CFG3_APME;
   2638 		break;
   2639 	case WM_T_82544:
   2640 		apme_mask = NVM_CFG2_82544_APM_EN;
   2641 		eeprom_data = cfg2;
   2642 		break;
   2643 	case WM_T_82546:
   2644 	case WM_T_82546_3:
   2645 	case WM_T_82571:
   2646 	case WM_T_82572:
   2647 	case WM_T_82573:
   2648 	case WM_T_82574:
   2649 	case WM_T_82583:
   2650 	case WM_T_80003:
   2651 	case WM_T_82575:
   2652 	case WM_T_82576:
   2653 		apme_mask = NVM_CFG3_APME;
   2654 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2655 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2656 		break;
   2657 	case WM_T_82580:
   2658 	case WM_T_I350:
   2659 	case WM_T_I354:
   2660 	case WM_T_I210:
   2661 	case WM_T_I211:
   2662 		apme_mask = NVM_CFG3_APME;
   2663 		wm_nvm_read(sc,
   2664 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2665 		    1, &eeprom_data);
   2666 		break;
   2667 	case WM_T_ICH8:
   2668 	case WM_T_ICH9:
   2669 	case WM_T_ICH10:
   2670 	case WM_T_PCH:
   2671 	case WM_T_PCH2:
   2672 	case WM_T_PCH_LPT:
   2673 	case WM_T_PCH_SPT:
   2674 	case WM_T_PCH_CNP:
   2675 		/* Already checked before wm_reset () */
   2676 		apme_mask = eeprom_data = 0;
   2677 		break;
   2678 	default: /* XXX 82540 */
   2679 		apme_mask = NVM_CFG3_APME;
   2680 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2681 		break;
   2682 	}
   2683 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2684 	if ((eeprom_data & apme_mask) != 0)
   2685 		sc->sc_flags |= WM_F_WOL;
   2686 
   2687 	/*
   2688 	 * We have the eeprom settings, now apply the special cases
   2689 	 * where the eeprom may be wrong or the board won't support
   2690 	 * wake on lan on a particular port
   2691 	 */
   2692 	switch (sc->sc_pcidevid) {
   2693 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2694 		sc->sc_flags &= ~WM_F_WOL;
   2695 		break;
   2696 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2697 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2698 		/* Wake events only supported on port A for dual fiber
   2699 		 * regardless of eeprom setting */
   2700 		if (sc->sc_funcid == 1)
   2701 			sc->sc_flags &= ~WM_F_WOL;
   2702 		break;
   2703 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2704 		/* If quad port adapter, disable WoL on all but port A */
   2705 		if (sc->sc_funcid != 0)
   2706 			sc->sc_flags &= ~WM_F_WOL;
   2707 		break;
   2708 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2709 		/* Wake events only supported on port A for dual fiber
   2710 		 * regardless of eeprom setting */
   2711 		if (sc->sc_funcid == 1)
   2712 			sc->sc_flags &= ~WM_F_WOL;
   2713 		break;
   2714 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2715 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2716 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2717 		/* If quad port adapter, disable WoL on all but port A */
   2718 		if (sc->sc_funcid != 0)
   2719 			sc->sc_flags &= ~WM_F_WOL;
   2720 		break;
   2721 	}
   2722 
   2723 	if (sc->sc_type >= WM_T_82575) {
   2724 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2725 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2726 			    nvmword);
   2727 			if ((sc->sc_type == WM_T_82575) ||
   2728 			    (sc->sc_type == WM_T_82576)) {
   2729 				/* Check NVM for autonegotiation */
   2730 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2731 				    != 0)
   2732 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2733 			}
   2734 			if ((sc->sc_type == WM_T_82575) ||
   2735 			    (sc->sc_type == WM_T_I350)) {
   2736 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2737 					sc->sc_flags |= WM_F_MAS;
   2738 			}
   2739 		}
   2740 	}
   2741 
   2742 	/*
   2743 	 * XXX need special handling for some multiple port cards
   2744 	 * to disable a paticular port.
   2745 	 */
   2746 
   2747 	if (sc->sc_type >= WM_T_82544) {
   2748 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2749 		if (pn != NULL) {
   2750 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2751 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2752 		} else {
   2753 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2754 				aprint_error_dev(sc->sc_dev,
   2755 				    "unable to read SWDPIN\n");
   2756 				goto out;
   2757 			}
   2758 		}
   2759 	}
   2760 
   2761 	if (cfg1 & NVM_CFG1_ILOS)
   2762 		sc->sc_ctrl |= CTRL_ILOS;
   2763 
   2764 	/*
   2765 	 * XXX
   2766 	 * This code isn't correct because pin 2 and 3 are located
   2767 	 * in different position on newer chips. Check all datasheet.
   2768 	 *
   2769 	 * Until resolve this problem, check if a chip < 82580
   2770 	 */
   2771 	if (sc->sc_type <= WM_T_82580) {
   2772 		if (sc->sc_type >= WM_T_82544) {
   2773 			sc->sc_ctrl |=
   2774 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2775 			    CTRL_SWDPIO_SHIFT;
   2776 			sc->sc_ctrl |=
   2777 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2778 			    CTRL_SWDPINS_SHIFT;
   2779 		} else {
   2780 			sc->sc_ctrl |=
   2781 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2782 			    CTRL_SWDPIO_SHIFT;
   2783 		}
   2784 	}
   2785 
   2786 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2787 		wm_nvm_read(sc,
   2788 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2789 		    1, &nvmword);
   2790 		if (nvmword & NVM_CFG3_ILOS)
   2791 			sc->sc_ctrl |= CTRL_ILOS;
   2792 	}
   2793 
   2794 #if 0
   2795 	if (sc->sc_type >= WM_T_82544) {
   2796 		if (cfg1 & NVM_CFG1_IPS0)
   2797 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2798 		if (cfg1 & NVM_CFG1_IPS1)
   2799 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2800 		sc->sc_ctrl_ext |=
   2801 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2802 		    CTRL_EXT_SWDPIO_SHIFT;
   2803 		sc->sc_ctrl_ext |=
   2804 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2805 		    CTRL_EXT_SWDPINS_SHIFT;
   2806 	} else {
   2807 		sc->sc_ctrl_ext |=
   2808 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2809 		    CTRL_EXT_SWDPIO_SHIFT;
   2810 	}
   2811 #endif
   2812 
   2813 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2814 #if 0
   2815 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2816 #endif
   2817 
   2818 	if (sc->sc_type == WM_T_PCH) {
   2819 		uint16_t val;
   2820 
   2821 		/* Save the NVM K1 bit setting */
   2822 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2823 
   2824 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2825 			sc->sc_nvm_k1_enabled = 1;
   2826 		else
   2827 			sc->sc_nvm_k1_enabled = 0;
   2828 	}
   2829 
   2830 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2831 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2832 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2833 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2834 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2835 	    || sc->sc_type == WM_T_82573
   2836 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2837 		/* Copper only */
   2838 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2839 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2840 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2841 	    || (sc->sc_type ==WM_T_I211)) {
   2842 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2843 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2844 		switch (link_mode) {
   2845 		case CTRL_EXT_LINK_MODE_1000KX:
   2846 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2847 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2848 			break;
   2849 		case CTRL_EXT_LINK_MODE_SGMII:
   2850 			if (wm_sgmii_uses_mdio(sc)) {
   2851 				aprint_normal_dev(sc->sc_dev,
   2852 				    "SGMII(MDIO)\n");
   2853 				sc->sc_flags |= WM_F_SGMII;
   2854 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2855 				break;
   2856 			}
   2857 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2858 			/*FALLTHROUGH*/
   2859 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2860 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2861 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2862 				if (link_mode
   2863 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2864 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2865 					sc->sc_flags |= WM_F_SGMII;
   2866 					aprint_verbose_dev(sc->sc_dev,
   2867 					    "SGMII\n");
   2868 				} else {
   2869 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2870 					aprint_verbose_dev(sc->sc_dev,
   2871 					    "SERDES\n");
   2872 				}
   2873 				break;
   2874 			}
   2875 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2876 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2877 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2878 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2879 				sc->sc_flags |= WM_F_SGMII;
   2880 			}
   2881 			/* Do not change link mode for 100BaseFX */
   2882 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2883 				break;
   2884 
   2885 			/* Change current link mode setting */
   2886 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2887 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2888 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2889 			else
   2890 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2891 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2892 			break;
   2893 		case CTRL_EXT_LINK_MODE_GMII:
   2894 		default:
   2895 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2896 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2897 			break;
   2898 		}
   2899 
   2900 		reg &= ~CTRL_EXT_I2C_ENA;
   2901 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2902 			reg |= CTRL_EXT_I2C_ENA;
   2903 		else
   2904 			reg &= ~CTRL_EXT_I2C_ENA;
   2905 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2906 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2907 			if (!wm_sgmii_uses_mdio(sc))
   2908 				wm_gmii_setup_phytype(sc, 0, 0);
   2909 			wm_reset_mdicnfg_82580(sc);
   2910 		}
   2911 	} else if (sc->sc_type < WM_T_82543 ||
   2912 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2913 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2914 			aprint_error_dev(sc->sc_dev,
   2915 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2916 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2917 		}
   2918 	} else {
   2919 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2920 			aprint_error_dev(sc->sc_dev,
   2921 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2922 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2923 		}
   2924 	}
   2925 
   2926 	if (sc->sc_type >= WM_T_PCH2)
   2927 		sc->sc_flags |= WM_F_EEE;
   2928 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2929 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2930 		/* XXX: Need special handling for I354. (not yet) */
   2931 		if (sc->sc_type != WM_T_I354)
   2932 			sc->sc_flags |= WM_F_EEE;
   2933 	}
   2934 
   2935 	/*
   2936 	 * The I350 has a bug where it always strips the CRC whether
   2937 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   2938 	 */
   2939 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2940 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2941 		sc->sc_flags |= WM_F_CRC_STRIP;
   2942 
   2943 	/* Set device properties (macflags) */
   2944 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2945 
   2946 	if (sc->sc_flags != 0) {
   2947 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2948 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2949 	}
   2950 
   2951 #ifdef WM_MPSAFE
   2952 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2953 #else
   2954 	sc->sc_core_lock = NULL;
   2955 #endif
   2956 
   2957 	/* Initialize the media structures accordingly. */
   2958 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2959 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2960 	else
   2961 		wm_tbi_mediainit(sc); /* All others */
   2962 
   2963 	ifp = &sc->sc_ethercom.ec_if;
   2964 	xname = device_xname(sc->sc_dev);
   2965 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2966 	ifp->if_softc = sc;
   2967 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2968 #ifdef WM_MPSAFE
   2969 	ifp->if_extflags = IFEF_MPSAFE;
   2970 #endif
   2971 	ifp->if_ioctl = wm_ioctl;
   2972 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2973 		ifp->if_start = wm_nq_start;
   2974 		/*
   2975 		 * When the number of CPUs is one and the controller can use
   2976 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2977 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2978 		 * and the other is used for link status changing.
   2979 		 * In this situation, wm_nq_transmit() is disadvantageous
   2980 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2981 		 */
   2982 		if (wm_is_using_multiqueue(sc))
   2983 			ifp->if_transmit = wm_nq_transmit;
   2984 	} else {
   2985 		ifp->if_start = wm_start;
   2986 		/*
   2987 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2988 		 */
   2989 		if (wm_is_using_multiqueue(sc))
   2990 			ifp->if_transmit = wm_transmit;
   2991 	}
   2992 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2993 	ifp->if_init = wm_init;
   2994 	ifp->if_stop = wm_stop;
   2995 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2996 	IFQ_SET_READY(&ifp->if_snd);
   2997 
   2998 	/* Check for jumbo frame */
   2999 	switch (sc->sc_type) {
   3000 	case WM_T_82573:
   3001 		/* XXX limited to 9234 if ASPM is disabled */
   3002 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3003 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3004 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3005 		break;
   3006 	case WM_T_82571:
   3007 	case WM_T_82572:
   3008 	case WM_T_82574:
   3009 	case WM_T_82583:
   3010 	case WM_T_82575:
   3011 	case WM_T_82576:
   3012 	case WM_T_82580:
   3013 	case WM_T_I350:
   3014 	case WM_T_I354:
   3015 	case WM_T_I210:
   3016 	case WM_T_I211:
   3017 	case WM_T_80003:
   3018 	case WM_T_ICH9:
   3019 	case WM_T_ICH10:
   3020 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3021 	case WM_T_PCH_LPT:
   3022 	case WM_T_PCH_SPT:
   3023 	case WM_T_PCH_CNP:
   3024 		/* XXX limited to 9234 */
   3025 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3026 		break;
   3027 	case WM_T_PCH:
   3028 		/* XXX limited to 4096 */
   3029 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3030 		break;
   3031 	case WM_T_82542_2_0:
   3032 	case WM_T_82542_2_1:
   3033 	case WM_T_ICH8:
   3034 		/* No support for jumbo frame */
   3035 		break;
   3036 	default:
   3037 		/* ETHER_MAX_LEN_JUMBO */
   3038 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3039 		break;
   3040 	}
   3041 
   3042 	/* If we're a i82543 or greater, we can support VLANs. */
   3043 	if (sc->sc_type >= WM_T_82543) {
   3044 		sc->sc_ethercom.ec_capabilities |=
   3045 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3046 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3047 	}
   3048 
   3049 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3050 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3051 
   3052 	/*
   3053 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   3054 	 * on i82543 and later.
   3055 	 */
   3056 	if (sc->sc_type >= WM_T_82543) {
   3057 		ifp->if_capabilities |=
   3058 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3059 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3060 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3061 		    IFCAP_CSUM_TCPv6_Tx |
   3062 		    IFCAP_CSUM_UDPv6_Tx;
   3063 	}
   3064 
   3065 	/*
   3066 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3067 	 *
   3068 	 *	82541GI (8086:1076) ... no
   3069 	 *	82572EI (8086:10b9) ... yes
   3070 	 */
   3071 	if (sc->sc_type >= WM_T_82571) {
   3072 		ifp->if_capabilities |=
   3073 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3074 	}
   3075 
   3076 	/*
   3077 	 * If we're a i82544 or greater (except i82547), we can do
   3078 	 * TCP segmentation offload.
   3079 	 */
   3080 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3081 		ifp->if_capabilities |= IFCAP_TSOv4;
   3082 	}
   3083 
   3084 	if (sc->sc_type >= WM_T_82571) {
   3085 		ifp->if_capabilities |= IFCAP_TSOv6;
   3086 	}
   3087 
   3088 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3089 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3090 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3091 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3092 
   3093 	/* Attach the interface. */
   3094 	if_initialize(ifp);
   3095 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3096 	ether_ifattach(ifp, enaddr);
   3097 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3098 	if_register(ifp);
   3099 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3100 	    RND_FLAG_DEFAULT);
   3101 
   3102 #ifdef WM_EVENT_COUNTERS
   3103 	/* Attach event counters. */
   3104 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3105 	    NULL, xname, "linkintr");
   3106 
   3107 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3108 	    NULL, xname, "tx_xoff");
   3109 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3110 	    NULL, xname, "tx_xon");
   3111 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3112 	    NULL, xname, "rx_xoff");
   3113 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3114 	    NULL, xname, "rx_xon");
   3115 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3116 	    NULL, xname, "rx_macctl");
   3117 #endif /* WM_EVENT_COUNTERS */
   3118 
   3119 	sc->sc_txrx_use_workqueue = false;
   3120 
   3121 	if (wm_phy_need_linkdown_discard(sc))
   3122 		wm_set_linkdown_discard(sc);
   3123 
   3124 	wm_init_sysctls(sc);
   3125 
   3126 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3127 		pmf_class_network_register(self, ifp);
   3128 	else
   3129 		aprint_error_dev(self, "couldn't establish power handler\n");
   3130 
   3131 	sc->sc_flags |= WM_F_ATTACHED;
   3132 out:
   3133 	return;
   3134 }
   3135 
   3136 /* The detach function (ca_detach) */
   3137 static int
   3138 wm_detach(device_t self, int flags __unused)
   3139 {
   3140 	struct wm_softc *sc = device_private(self);
   3141 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3142 	int i;
   3143 
   3144 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3145 		return 0;
   3146 
   3147 	/* Stop the interface. Callouts are stopped in it. */
   3148 	wm_stop(ifp, 1);
   3149 
   3150 	pmf_device_deregister(self);
   3151 
   3152 	sysctl_teardown(&sc->sc_sysctllog);
   3153 
   3154 #ifdef WM_EVENT_COUNTERS
   3155 	evcnt_detach(&sc->sc_ev_linkintr);
   3156 
   3157 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3158 	evcnt_detach(&sc->sc_ev_tx_xon);
   3159 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3160 	evcnt_detach(&sc->sc_ev_rx_xon);
   3161 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3162 #endif /* WM_EVENT_COUNTERS */
   3163 
   3164 	rnd_detach_source(&sc->rnd_source);
   3165 
   3166 	/* Tell the firmware about the release */
   3167 	WM_CORE_LOCK(sc);
   3168 	wm_release_manageability(sc);
   3169 	wm_release_hw_control(sc);
   3170 	wm_enable_wakeup(sc);
   3171 	WM_CORE_UNLOCK(sc);
   3172 
   3173 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3174 
   3175 	ether_ifdetach(ifp);
   3176 	if_detach(ifp);
   3177 	if_percpuq_destroy(sc->sc_ipq);
   3178 
   3179 	/* Delete all remaining media. */
   3180 	ifmedia_fini(&sc->sc_mii.mii_media);
   3181 
   3182 	/* Unload RX dmamaps and free mbufs */
   3183 	for (i = 0; i < sc->sc_nqueues; i++) {
   3184 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3185 		mutex_enter(rxq->rxq_lock);
   3186 		wm_rxdrain(rxq);
   3187 		mutex_exit(rxq->rxq_lock);
   3188 	}
   3189 	/* Must unlock here */
   3190 
   3191 	/* Disestablish the interrupt handler */
   3192 	for (i = 0; i < sc->sc_nintrs; i++) {
   3193 		if (sc->sc_ihs[i] != NULL) {
   3194 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3195 			sc->sc_ihs[i] = NULL;
   3196 		}
   3197 	}
   3198 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3199 
   3200 	/* wm_stop() ensure workqueue is stopped. */
   3201 	workqueue_destroy(sc->sc_queue_wq);
   3202 
   3203 	for (i = 0; i < sc->sc_nqueues; i++)
   3204 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3205 
   3206 	wm_free_txrx_queues(sc);
   3207 
   3208 	/* Unmap the registers */
   3209 	if (sc->sc_ss) {
   3210 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3211 		sc->sc_ss = 0;
   3212 	}
   3213 	if (sc->sc_ios) {
   3214 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3215 		sc->sc_ios = 0;
   3216 	}
   3217 	if (sc->sc_flashs) {
   3218 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3219 		sc->sc_flashs = 0;
   3220 	}
   3221 
   3222 	if (sc->sc_core_lock)
   3223 		mutex_obj_free(sc->sc_core_lock);
   3224 	if (sc->sc_ich_phymtx)
   3225 		mutex_obj_free(sc->sc_ich_phymtx);
   3226 	if (sc->sc_ich_nvmmtx)
   3227 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3228 
   3229 	return 0;
   3230 }
   3231 
   3232 static bool
   3233 wm_suspend(device_t self, const pmf_qual_t *qual)
   3234 {
   3235 	struct wm_softc *sc = device_private(self);
   3236 
   3237 	wm_release_manageability(sc);
   3238 	wm_release_hw_control(sc);
   3239 	wm_enable_wakeup(sc);
   3240 
   3241 	return true;
   3242 }
   3243 
   3244 static bool
   3245 wm_resume(device_t self, const pmf_qual_t *qual)
   3246 {
   3247 	struct wm_softc *sc = device_private(self);
   3248 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3249 	pcireg_t reg;
   3250 	char buf[256];
   3251 
   3252 	reg = CSR_READ(sc, WMREG_WUS);
   3253 	if (reg != 0) {
   3254 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3255 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3256 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3257 	}
   3258 
   3259 	if (sc->sc_type >= WM_T_PCH2)
   3260 		wm_resume_workarounds_pchlan(sc);
   3261 	if ((ifp->if_flags & IFF_UP) == 0) {
   3262 		wm_reset(sc);
   3263 		/* Non-AMT based hardware can now take control from firmware */
   3264 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3265 			wm_get_hw_control(sc);
   3266 		wm_init_manageability(sc);
   3267 	} else {
   3268 		/*
   3269 		 * We called pmf_class_network_register(), so if_init() is
   3270 		 * automatically called when IFF_UP. wm_reset(),
   3271 		 * wm_get_hw_control() and wm_init_manageability() are called
   3272 		 * via wm_init().
   3273 		 */
   3274 	}
   3275 
   3276 	return true;
   3277 }
   3278 
   3279 /*
   3280  * wm_watchdog:		[ifnet interface function]
   3281  *
   3282  *	Watchdog timer handler.
   3283  */
   3284 static void
   3285 wm_watchdog(struct ifnet *ifp)
   3286 {
   3287 	int qid;
   3288 	struct wm_softc *sc = ifp->if_softc;
   3289 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3290 
   3291 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3292 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3293 
   3294 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3295 	}
   3296 
   3297 	/* IF any of queues hanged up, reset the interface. */
   3298 	if (hang_queue != 0) {
   3299 		(void)wm_init(ifp);
   3300 
   3301 		/*
   3302 		 * There are still some upper layer processing which call
   3303 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3304 		 */
   3305 		/* Try to get more packets going. */
   3306 		ifp->if_start(ifp);
   3307 	}
   3308 }
   3309 
   3310 
   3311 static void
   3312 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3313 {
   3314 
   3315 	mutex_enter(txq->txq_lock);
   3316 	if (txq->txq_sending &&
   3317 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3318 		wm_watchdog_txq_locked(ifp, txq, hang);
   3319 
   3320 	mutex_exit(txq->txq_lock);
   3321 }
   3322 
   3323 static void
   3324 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3325     uint16_t *hang)
   3326 {
   3327 	struct wm_softc *sc = ifp->if_softc;
   3328 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3329 
   3330 	KASSERT(mutex_owned(txq->txq_lock));
   3331 
   3332 	/*
   3333 	 * Since we're using delayed interrupts, sweep up
   3334 	 * before we report an error.
   3335 	 */
   3336 	wm_txeof(txq, UINT_MAX);
   3337 
   3338 	if (txq->txq_sending)
   3339 		*hang |= __BIT(wmq->wmq_id);
   3340 
   3341 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3342 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3343 		    device_xname(sc->sc_dev));
   3344 	} else {
   3345 #ifdef WM_DEBUG
   3346 		int i, j;
   3347 		struct wm_txsoft *txs;
   3348 #endif
   3349 		log(LOG_ERR,
   3350 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3351 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3352 		    txq->txq_next);
   3353 		if_statinc(ifp, if_oerrors);
   3354 #ifdef WM_DEBUG
   3355 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3356 		    i = WM_NEXTTXS(txq, i)) {
   3357 			txs = &txq->txq_soft[i];
   3358 			printf("txs %d tx %d -> %d\n",
   3359 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3360 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3361 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3362 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3363 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3364 					printf("\t %#08x%08x\n",
   3365 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3366 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3367 				} else {
   3368 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3369 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3370 					    txq->txq_descs[j].wtx_addr.wa_low);
   3371 					printf("\t %#04x%02x%02x%08x\n",
   3372 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3373 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3374 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3375 					    txq->txq_descs[j].wtx_cmdlen);
   3376 				}
   3377 				if (j == txs->txs_lastdesc)
   3378 					break;
   3379 			}
   3380 		}
   3381 #endif
   3382 	}
   3383 }
   3384 
   3385 /*
   3386  * wm_tick:
   3387  *
   3388  *	One second timer, used to check link status, sweep up
   3389  *	completed transmit jobs, etc.
   3390  */
   3391 static void
   3392 wm_tick(void *arg)
   3393 {
   3394 	struct wm_softc *sc = arg;
   3395 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3396 #ifndef WM_MPSAFE
   3397 	int s = splnet();
   3398 #endif
   3399 
   3400 	WM_CORE_LOCK(sc);
   3401 
   3402 	if (sc->sc_core_stopping) {
   3403 		WM_CORE_UNLOCK(sc);
   3404 #ifndef WM_MPSAFE
   3405 		splx(s);
   3406 #endif
   3407 		return;
   3408 	}
   3409 
   3410 	if (sc->sc_type >= WM_T_82542_2_1) {
   3411 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3412 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3413 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3414 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3415 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3416 	}
   3417 
   3418 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3419 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3420 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3421 	    + CSR_READ(sc, WMREG_CRCERRS)
   3422 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3423 	    + CSR_READ(sc, WMREG_SYMERRC)
   3424 	    + CSR_READ(sc, WMREG_RXERRC)
   3425 	    + CSR_READ(sc, WMREG_SEC)
   3426 	    + CSR_READ(sc, WMREG_CEXTERR)
   3427 	    + CSR_READ(sc, WMREG_RLEC));
   3428 	/*
   3429 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3430 	 * memory. It does not mean the number of dropped packet. Because
   3431 	 * ethernet controller can receive packets in such case if there is
   3432 	 * space in phy's FIFO.
   3433 	 *
   3434 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3435 	 * own EVCNT instead of if_iqdrops.
   3436 	 */
   3437 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3438 	IF_STAT_PUTREF(ifp);
   3439 
   3440 	if (sc->sc_flags & WM_F_HAS_MII)
   3441 		mii_tick(&sc->sc_mii);
   3442 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3443 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3444 		wm_serdes_tick(sc);
   3445 	else
   3446 		wm_tbi_tick(sc);
   3447 
   3448 	WM_CORE_UNLOCK(sc);
   3449 
   3450 	wm_watchdog(ifp);
   3451 
   3452 	callout_schedule(&sc->sc_tick_ch, hz);
   3453 }
   3454 
   3455 static int
   3456 wm_ifflags_cb(struct ethercom *ec)
   3457 {
   3458 	struct ifnet *ifp = &ec->ec_if;
   3459 	struct wm_softc *sc = ifp->if_softc;
   3460 	u_short iffchange;
   3461 	int ecchange;
   3462 	bool needreset = false;
   3463 	int rc = 0;
   3464 
   3465 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3466 		device_xname(sc->sc_dev), __func__));
   3467 
   3468 	WM_CORE_LOCK(sc);
   3469 
   3470 	/*
   3471 	 * Check for if_flags.
   3472 	 * Main usage is to prevent linkdown when opening bpf.
   3473 	 */
   3474 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3475 	sc->sc_if_flags = ifp->if_flags;
   3476 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3477 		needreset = true;
   3478 		goto ec;
   3479 	}
   3480 
   3481 	/* iff related updates */
   3482 	if ((iffchange & IFF_PROMISC) != 0)
   3483 		wm_set_filter(sc);
   3484 
   3485 	wm_set_vlan(sc);
   3486 
   3487 ec:
   3488 	/* Check for ec_capenable. */
   3489 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3490 	sc->sc_ec_capenable = ec->ec_capenable;
   3491 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3492 		needreset = true;
   3493 		goto out;
   3494 	}
   3495 
   3496 	/* ec related updates */
   3497 	wm_set_eee(sc);
   3498 
   3499 out:
   3500 	if (needreset)
   3501 		rc = ENETRESET;
   3502 	WM_CORE_UNLOCK(sc);
   3503 
   3504 	return rc;
   3505 }
   3506 
   3507 static bool
   3508 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3509 {
   3510 
   3511 	switch (sc->sc_phytype) {
   3512 	case WMPHY_82577: /* ihphy */
   3513 	case WMPHY_82578: /* atphy */
   3514 	case WMPHY_82579: /* ihphy */
   3515 	case WMPHY_I217: /* ihphy */
   3516 	case WMPHY_82580: /* ihphy */
   3517 	case WMPHY_I350: /* ihphy */
   3518 		return true;
   3519 	default:
   3520 		return false;
   3521 	}
   3522 }
   3523 
   3524 static void
   3525 wm_set_linkdown_discard(struct wm_softc *sc)
   3526 {
   3527 
   3528 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3529 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3530 
   3531 		mutex_enter(txq->txq_lock);
   3532 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3533 		mutex_exit(txq->txq_lock);
   3534 	}
   3535 }
   3536 
   3537 static void
   3538 wm_clear_linkdown_discard(struct wm_softc *sc)
   3539 {
   3540 
   3541 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3542 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3543 
   3544 		mutex_enter(txq->txq_lock);
   3545 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   3546 		mutex_exit(txq->txq_lock);
   3547 	}
   3548 }
   3549 
   3550 /*
   3551  * wm_ioctl:		[ifnet interface function]
   3552  *
   3553  *	Handle control requests from the operator.
   3554  */
   3555 static int
   3556 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3557 {
   3558 	struct wm_softc *sc = ifp->if_softc;
   3559 	struct ifreq *ifr = (struct ifreq *)data;
   3560 	struct ifaddr *ifa = (struct ifaddr *)data;
   3561 	struct sockaddr_dl *sdl;
   3562 	int s, error;
   3563 
   3564 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3565 		device_xname(sc->sc_dev), __func__));
   3566 
   3567 #ifndef WM_MPSAFE
   3568 	s = splnet();
   3569 #endif
   3570 	switch (cmd) {
   3571 	case SIOCSIFMEDIA:
   3572 		WM_CORE_LOCK(sc);
   3573 		/* Flow control requires full-duplex mode. */
   3574 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3575 		    (ifr->ifr_media & IFM_FDX) == 0)
   3576 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3577 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3578 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3579 				/* We can do both TXPAUSE and RXPAUSE. */
   3580 				ifr->ifr_media |=
   3581 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3582 			}
   3583 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3584 		}
   3585 		WM_CORE_UNLOCK(sc);
   3586 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3587 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   3588 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE)
   3589 				wm_set_linkdown_discard(sc);
   3590 			else
   3591 				wm_clear_linkdown_discard(sc);
   3592 		}
   3593 		break;
   3594 	case SIOCINITIFADDR:
   3595 		WM_CORE_LOCK(sc);
   3596 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3597 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3598 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3599 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3600 			/* Unicast address is the first multicast entry */
   3601 			wm_set_filter(sc);
   3602 			error = 0;
   3603 			WM_CORE_UNLOCK(sc);
   3604 			break;
   3605 		}
   3606 		WM_CORE_UNLOCK(sc);
   3607 		if (((ifp->if_flags & IFF_UP) == 0) && wm_phy_need_linkdown_discard(sc))
   3608 			wm_clear_linkdown_discard(sc);
   3609 		/*FALLTHROUGH*/
   3610 	default:
   3611 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   3612 			if (((ifp->if_flags & IFF_UP) == 0) && ((ifr->ifr_flags & IFF_UP) != 0)) {
   3613 				wm_clear_linkdown_discard(sc);
   3614 			} else if (((ifp->if_flags & IFF_UP) != 0) && ((ifr->ifr_flags & IFF_UP) == 0)) {
   3615 				wm_set_linkdown_discard(sc);
   3616 			}
   3617 		}
   3618 #ifdef WM_MPSAFE
   3619 		s = splnet();
   3620 #endif
   3621 		/* It may call wm_start, so unlock here */
   3622 		error = ether_ioctl(ifp, cmd, data);
   3623 #ifdef WM_MPSAFE
   3624 		splx(s);
   3625 #endif
   3626 		if (error != ENETRESET)
   3627 			break;
   3628 
   3629 		error = 0;
   3630 
   3631 		if (cmd == SIOCSIFCAP)
   3632 			error = (*ifp->if_init)(ifp);
   3633 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3634 			;
   3635 		else if (ifp->if_flags & IFF_RUNNING) {
   3636 			/*
   3637 			 * Multicast list has changed; set the hardware filter
   3638 			 * accordingly.
   3639 			 */
   3640 			WM_CORE_LOCK(sc);
   3641 			wm_set_filter(sc);
   3642 			WM_CORE_UNLOCK(sc);
   3643 		}
   3644 		break;
   3645 	}
   3646 
   3647 #ifndef WM_MPSAFE
   3648 	splx(s);
   3649 #endif
   3650 	return error;
   3651 }
   3652 
   3653 /* MAC address related */
   3654 
   3655 /*
   3656  * Get the offset of MAC address and return it.
   3657  * If error occured, use offset 0.
   3658  */
   3659 static uint16_t
   3660 wm_check_alt_mac_addr(struct wm_softc *sc)
   3661 {
   3662 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3663 	uint16_t offset = NVM_OFF_MACADDR;
   3664 
   3665 	/* Try to read alternative MAC address pointer */
   3666 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3667 		return 0;
   3668 
   3669 	/* Check pointer if it's valid or not. */
   3670 	if ((offset == 0x0000) || (offset == 0xffff))
   3671 		return 0;
   3672 
   3673 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3674 	/*
   3675 	 * Check whether alternative MAC address is valid or not.
   3676 	 * Some cards have non 0xffff pointer but those don't use
   3677 	 * alternative MAC address in reality.
   3678 	 *
   3679 	 * Check whether the broadcast bit is set or not.
   3680 	 */
   3681 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3682 		if (((myea[0] & 0xff) & 0x01) == 0)
   3683 			return offset; /* Found */
   3684 
   3685 	/* Not found */
   3686 	return 0;
   3687 }
   3688 
   3689 static int
   3690 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3691 {
   3692 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3693 	uint16_t offset = NVM_OFF_MACADDR;
   3694 	int do_invert = 0;
   3695 
   3696 	switch (sc->sc_type) {
   3697 	case WM_T_82580:
   3698 	case WM_T_I350:
   3699 	case WM_T_I354:
   3700 		/* EEPROM Top Level Partitioning */
   3701 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3702 		break;
   3703 	case WM_T_82571:
   3704 	case WM_T_82575:
   3705 	case WM_T_82576:
   3706 	case WM_T_80003:
   3707 	case WM_T_I210:
   3708 	case WM_T_I211:
   3709 		offset = wm_check_alt_mac_addr(sc);
   3710 		if (offset == 0)
   3711 			if ((sc->sc_funcid & 0x01) == 1)
   3712 				do_invert = 1;
   3713 		break;
   3714 	default:
   3715 		if ((sc->sc_funcid & 0x01) == 1)
   3716 			do_invert = 1;
   3717 		break;
   3718 	}
   3719 
   3720 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3721 		goto bad;
   3722 
   3723 	enaddr[0] = myea[0] & 0xff;
   3724 	enaddr[1] = myea[0] >> 8;
   3725 	enaddr[2] = myea[1] & 0xff;
   3726 	enaddr[3] = myea[1] >> 8;
   3727 	enaddr[4] = myea[2] & 0xff;
   3728 	enaddr[5] = myea[2] >> 8;
   3729 
   3730 	/*
   3731 	 * Toggle the LSB of the MAC address on the second port
   3732 	 * of some dual port cards.
   3733 	 */
   3734 	if (do_invert != 0)
   3735 		enaddr[5] ^= 1;
   3736 
   3737 	return 0;
   3738 
   3739  bad:
   3740 	return -1;
   3741 }
   3742 
   3743 /*
   3744  * wm_set_ral:
   3745  *
   3746  *	Set an entery in the receive address list.
   3747  */
   3748 static void
   3749 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3750 {
   3751 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3752 	uint32_t wlock_mac;
   3753 	int rv;
   3754 
   3755 	if (enaddr != NULL) {
   3756 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3757 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3758 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3759 		ral_hi |= RAL_AV;
   3760 	} else {
   3761 		ral_lo = 0;
   3762 		ral_hi = 0;
   3763 	}
   3764 
   3765 	switch (sc->sc_type) {
   3766 	case WM_T_82542_2_0:
   3767 	case WM_T_82542_2_1:
   3768 	case WM_T_82543:
   3769 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3770 		CSR_WRITE_FLUSH(sc);
   3771 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3772 		CSR_WRITE_FLUSH(sc);
   3773 		break;
   3774 	case WM_T_PCH2:
   3775 	case WM_T_PCH_LPT:
   3776 	case WM_T_PCH_SPT:
   3777 	case WM_T_PCH_CNP:
   3778 		if (idx == 0) {
   3779 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3780 			CSR_WRITE_FLUSH(sc);
   3781 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3782 			CSR_WRITE_FLUSH(sc);
   3783 			return;
   3784 		}
   3785 		if (sc->sc_type != WM_T_PCH2) {
   3786 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3787 			    FWSM_WLOCK_MAC);
   3788 			addrl = WMREG_SHRAL(idx - 1);
   3789 			addrh = WMREG_SHRAH(idx - 1);
   3790 		} else {
   3791 			wlock_mac = 0;
   3792 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3793 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3794 		}
   3795 
   3796 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3797 			rv = wm_get_swflag_ich8lan(sc);
   3798 			if (rv != 0)
   3799 				return;
   3800 			CSR_WRITE(sc, addrl, ral_lo);
   3801 			CSR_WRITE_FLUSH(sc);
   3802 			CSR_WRITE(sc, addrh, ral_hi);
   3803 			CSR_WRITE_FLUSH(sc);
   3804 			wm_put_swflag_ich8lan(sc);
   3805 		}
   3806 
   3807 		break;
   3808 	default:
   3809 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3810 		CSR_WRITE_FLUSH(sc);
   3811 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3812 		CSR_WRITE_FLUSH(sc);
   3813 		break;
   3814 	}
   3815 }
   3816 
   3817 /*
   3818  * wm_mchash:
   3819  *
   3820  *	Compute the hash of the multicast address for the 4096-bit
   3821  *	multicast filter.
   3822  */
   3823 static uint32_t
   3824 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3825 {
   3826 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3827 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3828 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3829 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3830 	uint32_t hash;
   3831 
   3832 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3833 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3834 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3835 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3836 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3837 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3838 		return (hash & 0x3ff);
   3839 	}
   3840 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3841 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3842 
   3843 	return (hash & 0xfff);
   3844 }
   3845 
   3846 /*
   3847  *
   3848  *
   3849  */
   3850 static int
   3851 wm_rar_count(struct wm_softc *sc)
   3852 {
   3853 	int size;
   3854 
   3855 	switch (sc->sc_type) {
   3856 	case WM_T_ICH8:
   3857 		size = WM_RAL_TABSIZE_ICH8 -1;
   3858 		break;
   3859 	case WM_T_ICH9:
   3860 	case WM_T_ICH10:
   3861 	case WM_T_PCH:
   3862 		size = WM_RAL_TABSIZE_ICH8;
   3863 		break;
   3864 	case WM_T_PCH2:
   3865 		size = WM_RAL_TABSIZE_PCH2;
   3866 		break;
   3867 	case WM_T_PCH_LPT:
   3868 	case WM_T_PCH_SPT:
   3869 	case WM_T_PCH_CNP:
   3870 		size = WM_RAL_TABSIZE_PCH_LPT;
   3871 		break;
   3872 	case WM_T_82575:
   3873 	case WM_T_I210:
   3874 	case WM_T_I211:
   3875 		size = WM_RAL_TABSIZE_82575;
   3876 		break;
   3877 	case WM_T_82576:
   3878 	case WM_T_82580:
   3879 		size = WM_RAL_TABSIZE_82576;
   3880 		break;
   3881 	case WM_T_I350:
   3882 	case WM_T_I354:
   3883 		size = WM_RAL_TABSIZE_I350;
   3884 		break;
   3885 	default:
   3886 		size = WM_RAL_TABSIZE;
   3887 	}
   3888 
   3889 	return size;
   3890 }
   3891 
   3892 /*
   3893  * wm_set_filter:
   3894  *
   3895  *	Set up the receive filter.
   3896  */
   3897 static void
   3898 wm_set_filter(struct wm_softc *sc)
   3899 {
   3900 	struct ethercom *ec = &sc->sc_ethercom;
   3901 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3902 	struct ether_multi *enm;
   3903 	struct ether_multistep step;
   3904 	bus_addr_t mta_reg;
   3905 	uint32_t hash, reg, bit;
   3906 	int i, size, ralmax, rv;
   3907 
   3908 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3909 		device_xname(sc->sc_dev), __func__));
   3910 
   3911 	if (sc->sc_type >= WM_T_82544)
   3912 		mta_reg = WMREG_CORDOVA_MTA;
   3913 	else
   3914 		mta_reg = WMREG_MTA;
   3915 
   3916 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3917 
   3918 	if (ifp->if_flags & IFF_BROADCAST)
   3919 		sc->sc_rctl |= RCTL_BAM;
   3920 	if (ifp->if_flags & IFF_PROMISC) {
   3921 		sc->sc_rctl |= RCTL_UPE;
   3922 		ETHER_LOCK(ec);
   3923 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3924 		ETHER_UNLOCK(ec);
   3925 		goto allmulti;
   3926 	}
   3927 
   3928 	/*
   3929 	 * Set the station address in the first RAL slot, and
   3930 	 * clear the remaining slots.
   3931 	 */
   3932 	size = wm_rar_count(sc);
   3933 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3934 
   3935 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3936 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3937 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3938 		switch (i) {
   3939 		case 0:
   3940 			/* We can use all entries */
   3941 			ralmax = size;
   3942 			break;
   3943 		case 1:
   3944 			/* Only RAR[0] */
   3945 			ralmax = 1;
   3946 			break;
   3947 		default:
   3948 			/* Available SHRA + RAR[0] */
   3949 			ralmax = i + 1;
   3950 		}
   3951 	} else
   3952 		ralmax = size;
   3953 	for (i = 1; i < size; i++) {
   3954 		if (i < ralmax)
   3955 			wm_set_ral(sc, NULL, i);
   3956 	}
   3957 
   3958 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3959 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3960 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3961 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3962 		size = WM_ICH8_MC_TABSIZE;
   3963 	else
   3964 		size = WM_MC_TABSIZE;
   3965 	/* Clear out the multicast table. */
   3966 	for (i = 0; i < size; i++) {
   3967 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3968 		CSR_WRITE_FLUSH(sc);
   3969 	}
   3970 
   3971 	ETHER_LOCK(ec);
   3972 	ETHER_FIRST_MULTI(step, ec, enm);
   3973 	while (enm != NULL) {
   3974 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3975 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3976 			ETHER_UNLOCK(ec);
   3977 			/*
   3978 			 * We must listen to a range of multicast addresses.
   3979 			 * For now, just accept all multicasts, rather than
   3980 			 * trying to set only those filter bits needed to match
   3981 			 * the range.  (At this time, the only use of address
   3982 			 * ranges is for IP multicast routing, for which the
   3983 			 * range is big enough to require all bits set.)
   3984 			 */
   3985 			goto allmulti;
   3986 		}
   3987 
   3988 		hash = wm_mchash(sc, enm->enm_addrlo);
   3989 
   3990 		reg = (hash >> 5);
   3991 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3992 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3993 		    || (sc->sc_type == WM_T_PCH2)
   3994 		    || (sc->sc_type == WM_T_PCH_LPT)
   3995 		    || (sc->sc_type == WM_T_PCH_SPT)
   3996 		    || (sc->sc_type == WM_T_PCH_CNP))
   3997 			reg &= 0x1f;
   3998 		else
   3999 			reg &= 0x7f;
   4000 		bit = hash & 0x1f;
   4001 
   4002 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4003 		hash |= 1U << bit;
   4004 
   4005 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4006 			/*
   4007 			 * 82544 Errata 9: Certain register cannot be written
   4008 			 * with particular alignments in PCI-X bus operation
   4009 			 * (FCAH, MTA and VFTA).
   4010 			 */
   4011 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4012 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4013 			CSR_WRITE_FLUSH(sc);
   4014 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4015 			CSR_WRITE_FLUSH(sc);
   4016 		} else {
   4017 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4018 			CSR_WRITE_FLUSH(sc);
   4019 		}
   4020 
   4021 		ETHER_NEXT_MULTI(step, enm);
   4022 	}
   4023 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4024 	ETHER_UNLOCK(ec);
   4025 
   4026 	goto setit;
   4027 
   4028  allmulti:
   4029 	sc->sc_rctl |= RCTL_MPE;
   4030 
   4031  setit:
   4032 	if (sc->sc_type >= WM_T_PCH2) {
   4033 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4034 		    && (ifp->if_mtu > ETHERMTU))
   4035 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4036 		else
   4037 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4038 		if (rv != 0)
   4039 			device_printf(sc->sc_dev,
   4040 			    "Failed to do workaround for jumbo frame.\n");
   4041 	}
   4042 
   4043 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4044 }
   4045 
   4046 /* Reset and init related */
   4047 
   4048 static void
   4049 wm_set_vlan(struct wm_softc *sc)
   4050 {
   4051 
   4052 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4053 		device_xname(sc->sc_dev), __func__));
   4054 
   4055 	/* Deal with VLAN enables. */
   4056 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4057 		sc->sc_ctrl |= CTRL_VME;
   4058 	else
   4059 		sc->sc_ctrl &= ~CTRL_VME;
   4060 
   4061 	/* Write the control registers. */
   4062 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4063 }
   4064 
   4065 static void
   4066 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4067 {
   4068 	uint32_t gcr;
   4069 	pcireg_t ctrl2;
   4070 
   4071 	gcr = CSR_READ(sc, WMREG_GCR);
   4072 
   4073 	/* Only take action if timeout value is defaulted to 0 */
   4074 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4075 		goto out;
   4076 
   4077 	if ((gcr & GCR_CAP_VER2) == 0) {
   4078 		gcr |= GCR_CMPL_TMOUT_10MS;
   4079 		goto out;
   4080 	}
   4081 
   4082 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4083 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4084 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4085 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4086 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4087 
   4088 out:
   4089 	/* Disable completion timeout resend */
   4090 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4091 
   4092 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4093 }
   4094 
   4095 void
   4096 wm_get_auto_rd_done(struct wm_softc *sc)
   4097 {
   4098 	int i;
   4099 
   4100 	/* wait for eeprom to reload */
   4101 	switch (sc->sc_type) {
   4102 	case WM_T_82571:
   4103 	case WM_T_82572:
   4104 	case WM_T_82573:
   4105 	case WM_T_82574:
   4106 	case WM_T_82583:
   4107 	case WM_T_82575:
   4108 	case WM_T_82576:
   4109 	case WM_T_82580:
   4110 	case WM_T_I350:
   4111 	case WM_T_I354:
   4112 	case WM_T_I210:
   4113 	case WM_T_I211:
   4114 	case WM_T_80003:
   4115 	case WM_T_ICH8:
   4116 	case WM_T_ICH9:
   4117 		for (i = 0; i < 10; i++) {
   4118 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4119 				break;
   4120 			delay(1000);
   4121 		}
   4122 		if (i == 10) {
   4123 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4124 			    "complete\n", device_xname(sc->sc_dev));
   4125 		}
   4126 		break;
   4127 	default:
   4128 		break;
   4129 	}
   4130 }
   4131 
   4132 void
   4133 wm_lan_init_done(struct wm_softc *sc)
   4134 {
   4135 	uint32_t reg = 0;
   4136 	int i;
   4137 
   4138 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4139 		device_xname(sc->sc_dev), __func__));
   4140 
   4141 	/* Wait for eeprom to reload */
   4142 	switch (sc->sc_type) {
   4143 	case WM_T_ICH10:
   4144 	case WM_T_PCH:
   4145 	case WM_T_PCH2:
   4146 	case WM_T_PCH_LPT:
   4147 	case WM_T_PCH_SPT:
   4148 	case WM_T_PCH_CNP:
   4149 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4150 			reg = CSR_READ(sc, WMREG_STATUS);
   4151 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4152 				break;
   4153 			delay(100);
   4154 		}
   4155 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4156 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4157 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4158 		}
   4159 		break;
   4160 	default:
   4161 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4162 		    __func__);
   4163 		break;
   4164 	}
   4165 
   4166 	reg &= ~STATUS_LAN_INIT_DONE;
   4167 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4168 }
   4169 
   4170 void
   4171 wm_get_cfg_done(struct wm_softc *sc)
   4172 {
   4173 	int mask;
   4174 	uint32_t reg;
   4175 	int i;
   4176 
   4177 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4178 		device_xname(sc->sc_dev), __func__));
   4179 
   4180 	/* Wait for eeprom to reload */
   4181 	switch (sc->sc_type) {
   4182 	case WM_T_82542_2_0:
   4183 	case WM_T_82542_2_1:
   4184 		/* null */
   4185 		break;
   4186 	case WM_T_82543:
   4187 	case WM_T_82544:
   4188 	case WM_T_82540:
   4189 	case WM_T_82545:
   4190 	case WM_T_82545_3:
   4191 	case WM_T_82546:
   4192 	case WM_T_82546_3:
   4193 	case WM_T_82541:
   4194 	case WM_T_82541_2:
   4195 	case WM_T_82547:
   4196 	case WM_T_82547_2:
   4197 	case WM_T_82573:
   4198 	case WM_T_82574:
   4199 	case WM_T_82583:
   4200 		/* generic */
   4201 		delay(10*1000);
   4202 		break;
   4203 	case WM_T_80003:
   4204 	case WM_T_82571:
   4205 	case WM_T_82572:
   4206 	case WM_T_82575:
   4207 	case WM_T_82576:
   4208 	case WM_T_82580:
   4209 	case WM_T_I350:
   4210 	case WM_T_I354:
   4211 	case WM_T_I210:
   4212 	case WM_T_I211:
   4213 		if (sc->sc_type == WM_T_82571) {
   4214 			/* Only 82571 shares port 0 */
   4215 			mask = EEMNGCTL_CFGDONE_0;
   4216 		} else
   4217 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4218 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4219 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4220 				break;
   4221 			delay(1000);
   4222 		}
   4223 		if (i >= WM_PHY_CFG_TIMEOUT)
   4224 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4225 				device_xname(sc->sc_dev), __func__));
   4226 		break;
   4227 	case WM_T_ICH8:
   4228 	case WM_T_ICH9:
   4229 	case WM_T_ICH10:
   4230 	case WM_T_PCH:
   4231 	case WM_T_PCH2:
   4232 	case WM_T_PCH_LPT:
   4233 	case WM_T_PCH_SPT:
   4234 	case WM_T_PCH_CNP:
   4235 		delay(10*1000);
   4236 		if (sc->sc_type >= WM_T_ICH10)
   4237 			wm_lan_init_done(sc);
   4238 		else
   4239 			wm_get_auto_rd_done(sc);
   4240 
   4241 		/* Clear PHY Reset Asserted bit */
   4242 		reg = CSR_READ(sc, WMREG_STATUS);
   4243 		if ((reg & STATUS_PHYRA) != 0)
   4244 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4245 		break;
   4246 	default:
   4247 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4248 		    __func__);
   4249 		break;
   4250 	}
   4251 }
   4252 
   4253 int
   4254 wm_phy_post_reset(struct wm_softc *sc)
   4255 {
   4256 	device_t dev = sc->sc_dev;
   4257 	uint16_t reg;
   4258 	int rv = 0;
   4259 
   4260 	/* This function is only for ICH8 and newer. */
   4261 	if (sc->sc_type < WM_T_ICH8)
   4262 		return 0;
   4263 
   4264 	if (wm_phy_resetisblocked(sc)) {
   4265 		/* XXX */
   4266 		device_printf(dev, "PHY is blocked\n");
   4267 		return -1;
   4268 	}
   4269 
   4270 	/* Allow time for h/w to get to quiescent state after reset */
   4271 	delay(10*1000);
   4272 
   4273 	/* Perform any necessary post-reset workarounds */
   4274 	if (sc->sc_type == WM_T_PCH)
   4275 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4276 	else if (sc->sc_type == WM_T_PCH2)
   4277 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4278 	if (rv != 0)
   4279 		return rv;
   4280 
   4281 	/* Clear the host wakeup bit after lcd reset */
   4282 	if (sc->sc_type >= WM_T_PCH) {
   4283 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4284 		reg &= ~BM_WUC_HOST_WU_BIT;
   4285 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4286 	}
   4287 
   4288 	/* Configure the LCD with the extended configuration region in NVM */
   4289 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4290 		return rv;
   4291 
   4292 	/* Configure the LCD with the OEM bits in NVM */
   4293 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4294 
   4295 	if (sc->sc_type == WM_T_PCH2) {
   4296 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4297 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4298 			delay(10 * 1000);
   4299 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4300 		}
   4301 		/* Set EEE LPI Update Timer to 200usec */
   4302 		rv = sc->phy.acquire(sc);
   4303 		if (rv)
   4304 			return rv;
   4305 		rv = wm_write_emi_reg_locked(dev,
   4306 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4307 		sc->phy.release(sc);
   4308 	}
   4309 
   4310 	return rv;
   4311 }
   4312 
   4313 /* Only for PCH and newer */
   4314 static int
   4315 wm_write_smbus_addr(struct wm_softc *sc)
   4316 {
   4317 	uint32_t strap, freq;
   4318 	uint16_t phy_data;
   4319 	int rv;
   4320 
   4321 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4322 		device_xname(sc->sc_dev), __func__));
   4323 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4324 
   4325 	strap = CSR_READ(sc, WMREG_STRAP);
   4326 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4327 
   4328 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4329 	if (rv != 0)
   4330 		return -1;
   4331 
   4332 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4333 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4334 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4335 
   4336 	if (sc->sc_phytype == WMPHY_I217) {
   4337 		/* Restore SMBus frequency */
   4338 		if (freq --) {
   4339 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4340 			    | HV_SMB_ADDR_FREQ_HIGH);
   4341 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4342 			    HV_SMB_ADDR_FREQ_LOW);
   4343 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4344 			    HV_SMB_ADDR_FREQ_HIGH);
   4345 		} else
   4346 			DPRINTF(sc, WM_DEBUG_INIT,
   4347 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4348 				device_xname(sc->sc_dev), __func__));
   4349 	}
   4350 
   4351 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4352 	    phy_data);
   4353 }
   4354 
   4355 static int
   4356 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4357 {
   4358 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4359 	uint16_t phy_page = 0;
   4360 	int rv = 0;
   4361 
   4362 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4363 		device_xname(sc->sc_dev), __func__));
   4364 
   4365 	switch (sc->sc_type) {
   4366 	case WM_T_ICH8:
   4367 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4368 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4369 			return 0;
   4370 
   4371 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4372 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4373 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4374 			break;
   4375 		}
   4376 		/* FALLTHROUGH */
   4377 	case WM_T_PCH:
   4378 	case WM_T_PCH2:
   4379 	case WM_T_PCH_LPT:
   4380 	case WM_T_PCH_SPT:
   4381 	case WM_T_PCH_CNP:
   4382 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4383 		break;
   4384 	default:
   4385 		return 0;
   4386 	}
   4387 
   4388 	if ((rv = sc->phy.acquire(sc)) != 0)
   4389 		return rv;
   4390 
   4391 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4392 	if ((reg & sw_cfg_mask) == 0)
   4393 		goto release;
   4394 
   4395 	/*
   4396 	 * Make sure HW does not configure LCD from PHY extended configuration
   4397 	 * before SW configuration
   4398 	 */
   4399 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4400 	if ((sc->sc_type < WM_T_PCH2)
   4401 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4402 		goto release;
   4403 
   4404 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4405 		device_xname(sc->sc_dev), __func__));
   4406 	/* word_addr is in DWORD */
   4407 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4408 
   4409 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4410 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4411 	if (cnf_size == 0)
   4412 		goto release;
   4413 
   4414 	if (((sc->sc_type == WM_T_PCH)
   4415 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4416 	    || (sc->sc_type > WM_T_PCH)) {
   4417 		/*
   4418 		 * HW configures the SMBus address and LEDs when the OEM and
   4419 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4420 		 * are cleared, SW will configure them instead.
   4421 		 */
   4422 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4423 			device_xname(sc->sc_dev), __func__));
   4424 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4425 			goto release;
   4426 
   4427 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4428 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4429 		    (uint16_t)reg);
   4430 		if (rv != 0)
   4431 			goto release;
   4432 	}
   4433 
   4434 	/* Configure LCD from extended configuration region. */
   4435 	for (i = 0; i < cnf_size; i++) {
   4436 		uint16_t reg_data, reg_addr;
   4437 
   4438 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4439 			goto release;
   4440 
   4441 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4442 			goto release;
   4443 
   4444 		if (reg_addr == IGPHY_PAGE_SELECT)
   4445 			phy_page = reg_data;
   4446 
   4447 		reg_addr &= IGPHY_MAXREGADDR;
   4448 		reg_addr |= phy_page;
   4449 
   4450 		KASSERT(sc->phy.writereg_locked != NULL);
   4451 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4452 		    reg_data);
   4453 	}
   4454 
   4455 release:
   4456 	sc->phy.release(sc);
   4457 	return rv;
   4458 }
   4459 
   4460 /*
   4461  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4462  *  @sc:       pointer to the HW structure
   4463  *  @d0_state: boolean if entering d0 or d3 device state
   4464  *
   4465  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4466  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4467  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4468  */
   4469 int
   4470 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4471 {
   4472 	uint32_t mac_reg;
   4473 	uint16_t oem_reg;
   4474 	int rv;
   4475 
   4476 	if (sc->sc_type < WM_T_PCH)
   4477 		return 0;
   4478 
   4479 	rv = sc->phy.acquire(sc);
   4480 	if (rv != 0)
   4481 		return rv;
   4482 
   4483 	if (sc->sc_type == WM_T_PCH) {
   4484 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4485 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4486 			goto release;
   4487 	}
   4488 
   4489 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4490 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4491 		goto release;
   4492 
   4493 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4494 
   4495 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4496 	if (rv != 0)
   4497 		goto release;
   4498 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4499 
   4500 	if (d0_state) {
   4501 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4502 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4503 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4504 			oem_reg |= HV_OEM_BITS_LPLU;
   4505 	} else {
   4506 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4507 		    != 0)
   4508 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4509 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4510 		    != 0)
   4511 			oem_reg |= HV_OEM_BITS_LPLU;
   4512 	}
   4513 
   4514 	/* Set Restart auto-neg to activate the bits */
   4515 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4516 	    && (wm_phy_resetisblocked(sc) == false))
   4517 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4518 
   4519 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4520 
   4521 release:
   4522 	sc->phy.release(sc);
   4523 
   4524 	return rv;
   4525 }
   4526 
   4527 /* Init hardware bits */
   4528 void
   4529 wm_initialize_hardware_bits(struct wm_softc *sc)
   4530 {
   4531 	uint32_t tarc0, tarc1, reg;
   4532 
   4533 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4534 		device_xname(sc->sc_dev), __func__));
   4535 
   4536 	/* For 82571 variant, 80003 and ICHs */
   4537 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4538 	    || (sc->sc_type >= WM_T_80003)) {
   4539 
   4540 		/* Transmit Descriptor Control 0 */
   4541 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4542 		reg |= TXDCTL_COUNT_DESC;
   4543 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4544 
   4545 		/* Transmit Descriptor Control 1 */
   4546 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4547 		reg |= TXDCTL_COUNT_DESC;
   4548 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4549 
   4550 		/* TARC0 */
   4551 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4552 		switch (sc->sc_type) {
   4553 		case WM_T_82571:
   4554 		case WM_T_82572:
   4555 		case WM_T_82573:
   4556 		case WM_T_82574:
   4557 		case WM_T_82583:
   4558 		case WM_T_80003:
   4559 			/* Clear bits 30..27 */
   4560 			tarc0 &= ~__BITS(30, 27);
   4561 			break;
   4562 		default:
   4563 			break;
   4564 		}
   4565 
   4566 		switch (sc->sc_type) {
   4567 		case WM_T_82571:
   4568 		case WM_T_82572:
   4569 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4570 
   4571 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4572 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4573 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4574 			/* 8257[12] Errata No.7 */
   4575 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4576 
   4577 			/* TARC1 bit 28 */
   4578 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4579 				tarc1 &= ~__BIT(28);
   4580 			else
   4581 				tarc1 |= __BIT(28);
   4582 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4583 
   4584 			/*
   4585 			 * 8257[12] Errata No.13
   4586 			 * Disable Dyamic Clock Gating.
   4587 			 */
   4588 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4589 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4590 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4591 			break;
   4592 		case WM_T_82573:
   4593 		case WM_T_82574:
   4594 		case WM_T_82583:
   4595 			if ((sc->sc_type == WM_T_82574)
   4596 			    || (sc->sc_type == WM_T_82583))
   4597 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4598 
   4599 			/* Extended Device Control */
   4600 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4601 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4602 			reg |= __BIT(22);	/* Set bit 22 */
   4603 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4604 
   4605 			/* Device Control */
   4606 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4607 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4608 
   4609 			/* PCIe Control Register */
   4610 			/*
   4611 			 * 82573 Errata (unknown).
   4612 			 *
   4613 			 * 82574 Errata 25 and 82583 Errata 12
   4614 			 * "Dropped Rx Packets":
   4615 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4616 			 */
   4617 			reg = CSR_READ(sc, WMREG_GCR);
   4618 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4619 			CSR_WRITE(sc, WMREG_GCR, reg);
   4620 
   4621 			if ((sc->sc_type == WM_T_82574)
   4622 			    || (sc->sc_type == WM_T_82583)) {
   4623 				/*
   4624 				 * Document says this bit must be set for
   4625 				 * proper operation.
   4626 				 */
   4627 				reg = CSR_READ(sc, WMREG_GCR);
   4628 				reg |= __BIT(22);
   4629 				CSR_WRITE(sc, WMREG_GCR, reg);
   4630 
   4631 				/*
   4632 				 * Apply workaround for hardware errata
   4633 				 * documented in errata docs Fixes issue where
   4634 				 * some error prone or unreliable PCIe
   4635 				 * completions are occurring, particularly
   4636 				 * with ASPM enabled. Without fix, issue can
   4637 				 * cause Tx timeouts.
   4638 				 */
   4639 				reg = CSR_READ(sc, WMREG_GCR2);
   4640 				reg |= __BIT(0);
   4641 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4642 			}
   4643 			break;
   4644 		case WM_T_80003:
   4645 			/* TARC0 */
   4646 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4647 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4648 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4649 
   4650 			/* TARC1 bit 28 */
   4651 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4652 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4653 				tarc1 &= ~__BIT(28);
   4654 			else
   4655 				tarc1 |= __BIT(28);
   4656 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4657 			break;
   4658 		case WM_T_ICH8:
   4659 		case WM_T_ICH9:
   4660 		case WM_T_ICH10:
   4661 		case WM_T_PCH:
   4662 		case WM_T_PCH2:
   4663 		case WM_T_PCH_LPT:
   4664 		case WM_T_PCH_SPT:
   4665 		case WM_T_PCH_CNP:
   4666 			/* TARC0 */
   4667 			if (sc->sc_type == WM_T_ICH8) {
   4668 				/* Set TARC0 bits 29 and 28 */
   4669 				tarc0 |= __BITS(29, 28);
   4670 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4671 				tarc0 |= __BIT(29);
   4672 				/*
   4673 				 *  Drop bit 28. From Linux.
   4674 				 * See I218/I219 spec update
   4675 				 * "5. Buffer Overrun While the I219 is
   4676 				 * Processing DMA Transactions"
   4677 				 */
   4678 				tarc0 &= ~__BIT(28);
   4679 			}
   4680 			/* Set TARC0 bits 23,24,26,27 */
   4681 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4682 
   4683 			/* CTRL_EXT */
   4684 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4685 			reg |= __BIT(22);	/* Set bit 22 */
   4686 			/*
   4687 			 * Enable PHY low-power state when MAC is at D3
   4688 			 * w/o WoL
   4689 			 */
   4690 			if (sc->sc_type >= WM_T_PCH)
   4691 				reg |= CTRL_EXT_PHYPDEN;
   4692 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4693 
   4694 			/* TARC1 */
   4695 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4696 			/* bit 28 */
   4697 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4698 				tarc1 &= ~__BIT(28);
   4699 			else
   4700 				tarc1 |= __BIT(28);
   4701 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4702 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4703 
   4704 			/* Device Status */
   4705 			if (sc->sc_type == WM_T_ICH8) {
   4706 				reg = CSR_READ(sc, WMREG_STATUS);
   4707 				reg &= ~__BIT(31);
   4708 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4709 
   4710 			}
   4711 
   4712 			/* IOSFPC */
   4713 			if (sc->sc_type == WM_T_PCH_SPT) {
   4714 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4715 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4716 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4717 			}
   4718 			/*
   4719 			 * Work-around descriptor data corruption issue during
   4720 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4721 			 * capability.
   4722 			 */
   4723 			reg = CSR_READ(sc, WMREG_RFCTL);
   4724 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4725 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4726 			break;
   4727 		default:
   4728 			break;
   4729 		}
   4730 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4731 
   4732 		switch (sc->sc_type) {
   4733 		/*
   4734 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4735 		 * Avoid RSS Hash Value bug.
   4736 		 */
   4737 		case WM_T_82571:
   4738 		case WM_T_82572:
   4739 		case WM_T_82573:
   4740 		case WM_T_80003:
   4741 		case WM_T_ICH8:
   4742 			reg = CSR_READ(sc, WMREG_RFCTL);
   4743 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4744 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4745 			break;
   4746 		case WM_T_82574:
   4747 			/* Use extened Rx descriptor. */
   4748 			reg = CSR_READ(sc, WMREG_RFCTL);
   4749 			reg |= WMREG_RFCTL_EXSTEN;
   4750 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4751 			break;
   4752 		default:
   4753 			break;
   4754 		}
   4755 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4756 		/*
   4757 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4758 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4759 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4760 		 * Correctly by the Device"
   4761 		 *
   4762 		 * I354(C2000) Errata AVR53:
   4763 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4764 		 * Hang"
   4765 		 */
   4766 		reg = CSR_READ(sc, WMREG_RFCTL);
   4767 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4768 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4769 	}
   4770 }
   4771 
   4772 static uint32_t
   4773 wm_rxpbs_adjust_82580(uint32_t val)
   4774 {
   4775 	uint32_t rv = 0;
   4776 
   4777 	if (val < __arraycount(wm_82580_rxpbs_table))
   4778 		rv = wm_82580_rxpbs_table[val];
   4779 
   4780 	return rv;
   4781 }
   4782 
   4783 /*
   4784  * wm_reset_phy:
   4785  *
   4786  *	generic PHY reset function.
   4787  *	Same as e1000_phy_hw_reset_generic()
   4788  */
   4789 static int
   4790 wm_reset_phy(struct wm_softc *sc)
   4791 {
   4792 	uint32_t reg;
   4793 
   4794 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4795 		device_xname(sc->sc_dev), __func__));
   4796 	if (wm_phy_resetisblocked(sc))
   4797 		return -1;
   4798 
   4799 	sc->phy.acquire(sc);
   4800 
   4801 	reg = CSR_READ(sc, WMREG_CTRL);
   4802 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4803 	CSR_WRITE_FLUSH(sc);
   4804 
   4805 	delay(sc->phy.reset_delay_us);
   4806 
   4807 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4808 	CSR_WRITE_FLUSH(sc);
   4809 
   4810 	delay(150);
   4811 
   4812 	sc->phy.release(sc);
   4813 
   4814 	wm_get_cfg_done(sc);
   4815 	wm_phy_post_reset(sc);
   4816 
   4817 	return 0;
   4818 }
   4819 
   4820 /*
   4821  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4822  * so it is enough to check sc->sc_queue[0] only.
   4823  */
   4824 static void
   4825 wm_flush_desc_rings(struct wm_softc *sc)
   4826 {
   4827 	pcireg_t preg;
   4828 	uint32_t reg;
   4829 	struct wm_txqueue *txq;
   4830 	wiseman_txdesc_t *txd;
   4831 	int nexttx;
   4832 	uint32_t rctl;
   4833 
   4834 	/* First, disable MULR fix in FEXTNVM11 */
   4835 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4836 	reg |= FEXTNVM11_DIS_MULRFIX;
   4837 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4838 
   4839 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4840 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4841 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4842 		return;
   4843 
   4844 	/* TX */
   4845 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4846 	    preg, reg);
   4847 	reg = CSR_READ(sc, WMREG_TCTL);
   4848 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4849 
   4850 	txq = &sc->sc_queue[0].wmq_txq;
   4851 	nexttx = txq->txq_next;
   4852 	txd = &txq->txq_descs[nexttx];
   4853 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4854 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4855 	txd->wtx_fields.wtxu_status = 0;
   4856 	txd->wtx_fields.wtxu_options = 0;
   4857 	txd->wtx_fields.wtxu_vlan = 0;
   4858 
   4859 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4860 	    BUS_SPACE_BARRIER_WRITE);
   4861 
   4862 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4863 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4864 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4865 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4866 	delay(250);
   4867 
   4868 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4869 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4870 		return;
   4871 
   4872 	/* RX */
   4873 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4874 	rctl = CSR_READ(sc, WMREG_RCTL);
   4875 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4876 	CSR_WRITE_FLUSH(sc);
   4877 	delay(150);
   4878 
   4879 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4880 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4881 	reg &= 0xffffc000;
   4882 	/*
   4883 	 * Update thresholds: prefetch threshold to 31, host threshold
   4884 	 * to 1 and make sure the granularity is "descriptors" and not
   4885 	 * "cache lines"
   4886 	 */
   4887 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4888 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4889 
   4890 	/* Momentarily enable the RX ring for the changes to take effect */
   4891 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4892 	CSR_WRITE_FLUSH(sc);
   4893 	delay(150);
   4894 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4895 }
   4896 
   4897 /*
   4898  * wm_reset:
   4899  *
   4900  *	Reset the i82542 chip.
   4901  */
   4902 static void
   4903 wm_reset(struct wm_softc *sc)
   4904 {
   4905 	int phy_reset = 0;
   4906 	int i, error = 0;
   4907 	uint32_t reg;
   4908 	uint16_t kmreg;
   4909 	int rv;
   4910 
   4911 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4912 		device_xname(sc->sc_dev), __func__));
   4913 	KASSERT(sc->sc_type != 0);
   4914 
   4915 	/*
   4916 	 * Allocate on-chip memory according to the MTU size.
   4917 	 * The Packet Buffer Allocation register must be written
   4918 	 * before the chip is reset.
   4919 	 */
   4920 	switch (sc->sc_type) {
   4921 	case WM_T_82547:
   4922 	case WM_T_82547_2:
   4923 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4924 		    PBA_22K : PBA_30K;
   4925 		for (i = 0; i < sc->sc_nqueues; i++) {
   4926 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4927 			txq->txq_fifo_head = 0;
   4928 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4929 			txq->txq_fifo_size =
   4930 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4931 			txq->txq_fifo_stall = 0;
   4932 		}
   4933 		break;
   4934 	case WM_T_82571:
   4935 	case WM_T_82572:
   4936 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4937 	case WM_T_80003:
   4938 		sc->sc_pba = PBA_32K;
   4939 		break;
   4940 	case WM_T_82573:
   4941 		sc->sc_pba = PBA_12K;
   4942 		break;
   4943 	case WM_T_82574:
   4944 	case WM_T_82583:
   4945 		sc->sc_pba = PBA_20K;
   4946 		break;
   4947 	case WM_T_82576:
   4948 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4949 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4950 		break;
   4951 	case WM_T_82580:
   4952 	case WM_T_I350:
   4953 	case WM_T_I354:
   4954 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4955 		break;
   4956 	case WM_T_I210:
   4957 	case WM_T_I211:
   4958 		sc->sc_pba = PBA_34K;
   4959 		break;
   4960 	case WM_T_ICH8:
   4961 		/* Workaround for a bit corruption issue in FIFO memory */
   4962 		sc->sc_pba = PBA_8K;
   4963 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4964 		break;
   4965 	case WM_T_ICH9:
   4966 	case WM_T_ICH10:
   4967 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4968 		    PBA_14K : PBA_10K;
   4969 		break;
   4970 	case WM_T_PCH:
   4971 	case WM_T_PCH2:	/* XXX 14K? */
   4972 	case WM_T_PCH_LPT:
   4973 	case WM_T_PCH_SPT:
   4974 	case WM_T_PCH_CNP:
   4975 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   4976 		    PBA_12K : PBA_26K;
   4977 		break;
   4978 	default:
   4979 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4980 		    PBA_40K : PBA_48K;
   4981 		break;
   4982 	}
   4983 	/*
   4984 	 * Only old or non-multiqueue devices have the PBA register
   4985 	 * XXX Need special handling for 82575.
   4986 	 */
   4987 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4988 	    || (sc->sc_type == WM_T_82575))
   4989 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4990 
   4991 	/* Prevent the PCI-E bus from sticking */
   4992 	if (sc->sc_flags & WM_F_PCIE) {
   4993 		int timeout = 800;
   4994 
   4995 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4996 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4997 
   4998 		while (timeout--) {
   4999 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5000 			    == 0)
   5001 				break;
   5002 			delay(100);
   5003 		}
   5004 		if (timeout == 0)
   5005 			device_printf(sc->sc_dev,
   5006 			    "failed to disable busmastering\n");
   5007 	}
   5008 
   5009 	/* Set the completion timeout for interface */
   5010 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5011 	    || (sc->sc_type == WM_T_82580)
   5012 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5013 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5014 		wm_set_pcie_completion_timeout(sc);
   5015 
   5016 	/* Clear interrupt */
   5017 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5018 	if (wm_is_using_msix(sc)) {
   5019 		if (sc->sc_type != WM_T_82574) {
   5020 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5021 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5022 		} else
   5023 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5024 	}
   5025 
   5026 	/* Stop the transmit and receive processes. */
   5027 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5028 	sc->sc_rctl &= ~RCTL_EN;
   5029 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5030 	CSR_WRITE_FLUSH(sc);
   5031 
   5032 	/* XXX set_tbi_sbp_82543() */
   5033 
   5034 	delay(10*1000);
   5035 
   5036 	/* Must acquire the MDIO ownership before MAC reset */
   5037 	switch (sc->sc_type) {
   5038 	case WM_T_82573:
   5039 	case WM_T_82574:
   5040 	case WM_T_82583:
   5041 		error = wm_get_hw_semaphore_82573(sc);
   5042 		break;
   5043 	default:
   5044 		break;
   5045 	}
   5046 
   5047 	/*
   5048 	 * 82541 Errata 29? & 82547 Errata 28?
   5049 	 * See also the description about PHY_RST bit in CTRL register
   5050 	 * in 8254x_GBe_SDM.pdf.
   5051 	 */
   5052 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5053 		CSR_WRITE(sc, WMREG_CTRL,
   5054 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5055 		CSR_WRITE_FLUSH(sc);
   5056 		delay(5000);
   5057 	}
   5058 
   5059 	switch (sc->sc_type) {
   5060 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5061 	case WM_T_82541:
   5062 	case WM_T_82541_2:
   5063 	case WM_T_82547:
   5064 	case WM_T_82547_2:
   5065 		/*
   5066 		 * On some chipsets, a reset through a memory-mapped write
   5067 		 * cycle can cause the chip to reset before completing the
   5068 		 * write cycle. This causes major headache that can be avoided
   5069 		 * by issuing the reset via indirect register writes through
   5070 		 * I/O space.
   5071 		 *
   5072 		 * So, if we successfully mapped the I/O BAR at attach time,
   5073 		 * use that. Otherwise, try our luck with a memory-mapped
   5074 		 * reset.
   5075 		 */
   5076 		if (sc->sc_flags & WM_F_IOH_VALID)
   5077 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5078 		else
   5079 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5080 		break;
   5081 	case WM_T_82545_3:
   5082 	case WM_T_82546_3:
   5083 		/* Use the shadow control register on these chips. */
   5084 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5085 		break;
   5086 	case WM_T_80003:
   5087 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5088 		sc->phy.acquire(sc);
   5089 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5090 		sc->phy.release(sc);
   5091 		break;
   5092 	case WM_T_ICH8:
   5093 	case WM_T_ICH9:
   5094 	case WM_T_ICH10:
   5095 	case WM_T_PCH:
   5096 	case WM_T_PCH2:
   5097 	case WM_T_PCH_LPT:
   5098 	case WM_T_PCH_SPT:
   5099 	case WM_T_PCH_CNP:
   5100 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5101 		if (wm_phy_resetisblocked(sc) == false) {
   5102 			/*
   5103 			 * Gate automatic PHY configuration by hardware on
   5104 			 * non-managed 82579
   5105 			 */
   5106 			if ((sc->sc_type == WM_T_PCH2)
   5107 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5108 				== 0))
   5109 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5110 
   5111 			reg |= CTRL_PHY_RESET;
   5112 			phy_reset = 1;
   5113 		} else
   5114 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5115 		sc->phy.acquire(sc);
   5116 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5117 		/* Don't insert a completion barrier when reset */
   5118 		delay(20*1000);
   5119 		mutex_exit(sc->sc_ich_phymtx);
   5120 		break;
   5121 	case WM_T_82580:
   5122 	case WM_T_I350:
   5123 	case WM_T_I354:
   5124 	case WM_T_I210:
   5125 	case WM_T_I211:
   5126 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5127 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5128 			CSR_WRITE_FLUSH(sc);
   5129 		delay(5000);
   5130 		break;
   5131 	case WM_T_82542_2_0:
   5132 	case WM_T_82542_2_1:
   5133 	case WM_T_82543:
   5134 	case WM_T_82540:
   5135 	case WM_T_82545:
   5136 	case WM_T_82546:
   5137 	case WM_T_82571:
   5138 	case WM_T_82572:
   5139 	case WM_T_82573:
   5140 	case WM_T_82574:
   5141 	case WM_T_82575:
   5142 	case WM_T_82576:
   5143 	case WM_T_82583:
   5144 	default:
   5145 		/* Everything else can safely use the documented method. */
   5146 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5147 		break;
   5148 	}
   5149 
   5150 	/* Must release the MDIO ownership after MAC reset */
   5151 	switch (sc->sc_type) {
   5152 	case WM_T_82573:
   5153 	case WM_T_82574:
   5154 	case WM_T_82583:
   5155 		if (error == 0)
   5156 			wm_put_hw_semaphore_82573(sc);
   5157 		break;
   5158 	default:
   5159 		break;
   5160 	}
   5161 
   5162 	/* Set Phy Config Counter to 50msec */
   5163 	if (sc->sc_type == WM_T_PCH2) {
   5164 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5165 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5166 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5167 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5168 	}
   5169 
   5170 	if (phy_reset != 0)
   5171 		wm_get_cfg_done(sc);
   5172 
   5173 	/* Reload EEPROM */
   5174 	switch (sc->sc_type) {
   5175 	case WM_T_82542_2_0:
   5176 	case WM_T_82542_2_1:
   5177 	case WM_T_82543:
   5178 	case WM_T_82544:
   5179 		delay(10);
   5180 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5181 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5182 		CSR_WRITE_FLUSH(sc);
   5183 		delay(2000);
   5184 		break;
   5185 	case WM_T_82540:
   5186 	case WM_T_82545:
   5187 	case WM_T_82545_3:
   5188 	case WM_T_82546:
   5189 	case WM_T_82546_3:
   5190 		delay(5*1000);
   5191 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5192 		break;
   5193 	case WM_T_82541:
   5194 	case WM_T_82541_2:
   5195 	case WM_T_82547:
   5196 	case WM_T_82547_2:
   5197 		delay(20000);
   5198 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5199 		break;
   5200 	case WM_T_82571:
   5201 	case WM_T_82572:
   5202 	case WM_T_82573:
   5203 	case WM_T_82574:
   5204 	case WM_T_82583:
   5205 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5206 			delay(10);
   5207 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5208 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5209 			CSR_WRITE_FLUSH(sc);
   5210 		}
   5211 		/* check EECD_EE_AUTORD */
   5212 		wm_get_auto_rd_done(sc);
   5213 		/*
   5214 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5215 		 * is set.
   5216 		 */
   5217 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5218 		    || (sc->sc_type == WM_T_82583))
   5219 			delay(25*1000);
   5220 		break;
   5221 	case WM_T_82575:
   5222 	case WM_T_82576:
   5223 	case WM_T_82580:
   5224 	case WM_T_I350:
   5225 	case WM_T_I354:
   5226 	case WM_T_I210:
   5227 	case WM_T_I211:
   5228 	case WM_T_80003:
   5229 		/* check EECD_EE_AUTORD */
   5230 		wm_get_auto_rd_done(sc);
   5231 		break;
   5232 	case WM_T_ICH8:
   5233 	case WM_T_ICH9:
   5234 	case WM_T_ICH10:
   5235 	case WM_T_PCH:
   5236 	case WM_T_PCH2:
   5237 	case WM_T_PCH_LPT:
   5238 	case WM_T_PCH_SPT:
   5239 	case WM_T_PCH_CNP:
   5240 		break;
   5241 	default:
   5242 		panic("%s: unknown type\n", __func__);
   5243 	}
   5244 
   5245 	/* Check whether EEPROM is present or not */
   5246 	switch (sc->sc_type) {
   5247 	case WM_T_82575:
   5248 	case WM_T_82576:
   5249 	case WM_T_82580:
   5250 	case WM_T_I350:
   5251 	case WM_T_I354:
   5252 	case WM_T_ICH8:
   5253 	case WM_T_ICH9:
   5254 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5255 			/* Not found */
   5256 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5257 			if (sc->sc_type == WM_T_82575)
   5258 				wm_reset_init_script_82575(sc);
   5259 		}
   5260 		break;
   5261 	default:
   5262 		break;
   5263 	}
   5264 
   5265 	if (phy_reset != 0)
   5266 		wm_phy_post_reset(sc);
   5267 
   5268 	if ((sc->sc_type == WM_T_82580)
   5269 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5270 		/* Clear global device reset status bit */
   5271 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5272 	}
   5273 
   5274 	/* Clear any pending interrupt events. */
   5275 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5276 	reg = CSR_READ(sc, WMREG_ICR);
   5277 	if (wm_is_using_msix(sc)) {
   5278 		if (sc->sc_type != WM_T_82574) {
   5279 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5280 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5281 		} else
   5282 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5283 	}
   5284 
   5285 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5286 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5287 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5288 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5289 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5290 		reg |= KABGTXD_BGSQLBIAS;
   5291 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5292 	}
   5293 
   5294 	/* Reload sc_ctrl */
   5295 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5296 
   5297 	wm_set_eee(sc);
   5298 
   5299 	/*
   5300 	 * For PCH, this write will make sure that any noise will be detected
   5301 	 * as a CRC error and be dropped rather than show up as a bad packet
   5302 	 * to the DMA engine
   5303 	 */
   5304 	if (sc->sc_type == WM_T_PCH)
   5305 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5306 
   5307 	if (sc->sc_type >= WM_T_82544)
   5308 		CSR_WRITE(sc, WMREG_WUC, 0);
   5309 
   5310 	if (sc->sc_type < WM_T_82575)
   5311 		wm_disable_aspm(sc); /* Workaround for some chips */
   5312 
   5313 	wm_reset_mdicnfg_82580(sc);
   5314 
   5315 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5316 		wm_pll_workaround_i210(sc);
   5317 
   5318 	if (sc->sc_type == WM_T_80003) {
   5319 		/* Default to TRUE to enable the MDIC W/A */
   5320 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5321 
   5322 		rv = wm_kmrn_readreg(sc,
   5323 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5324 		if (rv == 0) {
   5325 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5326 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5327 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5328 			else
   5329 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5330 		}
   5331 	}
   5332 }
   5333 
   5334 /*
   5335  * wm_add_rxbuf:
   5336  *
   5337  *	Add a receive buffer to the indiciated descriptor.
   5338  */
   5339 static int
   5340 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5341 {
   5342 	struct wm_softc *sc = rxq->rxq_sc;
   5343 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5344 	struct mbuf *m;
   5345 	int error;
   5346 
   5347 	KASSERT(mutex_owned(rxq->rxq_lock));
   5348 
   5349 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5350 	if (m == NULL)
   5351 		return ENOBUFS;
   5352 
   5353 	MCLGET(m, M_DONTWAIT);
   5354 	if ((m->m_flags & M_EXT) == 0) {
   5355 		m_freem(m);
   5356 		return ENOBUFS;
   5357 	}
   5358 
   5359 	if (rxs->rxs_mbuf != NULL)
   5360 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5361 
   5362 	rxs->rxs_mbuf = m;
   5363 
   5364 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5365 	/*
   5366 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5367 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5368 	 */
   5369 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5370 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5371 	if (error) {
   5372 		/* XXX XXX XXX */
   5373 		aprint_error_dev(sc->sc_dev,
   5374 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5375 		panic("wm_add_rxbuf");
   5376 	}
   5377 
   5378 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5379 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5380 
   5381 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5382 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5383 			wm_init_rxdesc(rxq, idx);
   5384 	} else
   5385 		wm_init_rxdesc(rxq, idx);
   5386 
   5387 	return 0;
   5388 }
   5389 
   5390 /*
   5391  * wm_rxdrain:
   5392  *
   5393  *	Drain the receive queue.
   5394  */
   5395 static void
   5396 wm_rxdrain(struct wm_rxqueue *rxq)
   5397 {
   5398 	struct wm_softc *sc = rxq->rxq_sc;
   5399 	struct wm_rxsoft *rxs;
   5400 	int i;
   5401 
   5402 	KASSERT(mutex_owned(rxq->rxq_lock));
   5403 
   5404 	for (i = 0; i < WM_NRXDESC; i++) {
   5405 		rxs = &rxq->rxq_soft[i];
   5406 		if (rxs->rxs_mbuf != NULL) {
   5407 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5408 			m_freem(rxs->rxs_mbuf);
   5409 			rxs->rxs_mbuf = NULL;
   5410 		}
   5411 	}
   5412 }
   5413 
   5414 /*
   5415  * Setup registers for RSS.
   5416  *
   5417  * XXX not yet VMDq support
   5418  */
   5419 static void
   5420 wm_init_rss(struct wm_softc *sc)
   5421 {
   5422 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5423 	int i;
   5424 
   5425 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5426 
   5427 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5428 		unsigned int qid, reta_ent;
   5429 
   5430 		qid  = i % sc->sc_nqueues;
   5431 		switch (sc->sc_type) {
   5432 		case WM_T_82574:
   5433 			reta_ent = __SHIFTIN(qid,
   5434 			    RETA_ENT_QINDEX_MASK_82574);
   5435 			break;
   5436 		case WM_T_82575:
   5437 			reta_ent = __SHIFTIN(qid,
   5438 			    RETA_ENT_QINDEX1_MASK_82575);
   5439 			break;
   5440 		default:
   5441 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5442 			break;
   5443 		}
   5444 
   5445 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5446 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5447 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5448 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5449 	}
   5450 
   5451 	rss_getkey((uint8_t *)rss_key);
   5452 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5453 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5454 
   5455 	if (sc->sc_type == WM_T_82574)
   5456 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5457 	else
   5458 		mrqc = MRQC_ENABLE_RSS_MQ;
   5459 
   5460 	/*
   5461 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5462 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5463 	 */
   5464 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5465 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5466 #if 0
   5467 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5468 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5469 #endif
   5470 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5471 
   5472 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5473 }
   5474 
   5475 /*
   5476  * Adjust TX and RX queue numbers which the system actulally uses.
   5477  *
   5478  * The numbers are affected by below parameters.
   5479  *     - The nubmer of hardware queues
   5480  *     - The number of MSI-X vectors (= "nvectors" argument)
   5481  *     - ncpu
   5482  */
   5483 static void
   5484 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5485 {
   5486 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5487 
   5488 	if (nvectors < 2) {
   5489 		sc->sc_nqueues = 1;
   5490 		return;
   5491 	}
   5492 
   5493 	switch (sc->sc_type) {
   5494 	case WM_T_82572:
   5495 		hw_ntxqueues = 2;
   5496 		hw_nrxqueues = 2;
   5497 		break;
   5498 	case WM_T_82574:
   5499 		hw_ntxqueues = 2;
   5500 		hw_nrxqueues = 2;
   5501 		break;
   5502 	case WM_T_82575:
   5503 		hw_ntxqueues = 4;
   5504 		hw_nrxqueues = 4;
   5505 		break;
   5506 	case WM_T_82576:
   5507 		hw_ntxqueues = 16;
   5508 		hw_nrxqueues = 16;
   5509 		break;
   5510 	case WM_T_82580:
   5511 	case WM_T_I350:
   5512 	case WM_T_I354:
   5513 		hw_ntxqueues = 8;
   5514 		hw_nrxqueues = 8;
   5515 		break;
   5516 	case WM_T_I210:
   5517 		hw_ntxqueues = 4;
   5518 		hw_nrxqueues = 4;
   5519 		break;
   5520 	case WM_T_I211:
   5521 		hw_ntxqueues = 2;
   5522 		hw_nrxqueues = 2;
   5523 		break;
   5524 		/*
   5525 		 * As below ethernet controllers does not support MSI-X,
   5526 		 * this driver let them not use multiqueue.
   5527 		 *     - WM_T_80003
   5528 		 *     - WM_T_ICH8
   5529 		 *     - WM_T_ICH9
   5530 		 *     - WM_T_ICH10
   5531 		 *     - WM_T_PCH
   5532 		 *     - WM_T_PCH2
   5533 		 *     - WM_T_PCH_LPT
   5534 		 */
   5535 	default:
   5536 		hw_ntxqueues = 1;
   5537 		hw_nrxqueues = 1;
   5538 		break;
   5539 	}
   5540 
   5541 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5542 
   5543 	/*
   5544 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5545 	 * the number of queues used actually.
   5546 	 */
   5547 	if (nvectors < hw_nqueues + 1)
   5548 		sc->sc_nqueues = nvectors - 1;
   5549 	else
   5550 		sc->sc_nqueues = hw_nqueues;
   5551 
   5552 	/*
   5553 	 * As queues more then cpus cannot improve scaling, we limit
   5554 	 * the number of queues used actually.
   5555 	 */
   5556 	if (ncpu < sc->sc_nqueues)
   5557 		sc->sc_nqueues = ncpu;
   5558 }
   5559 
   5560 static inline bool
   5561 wm_is_using_msix(struct wm_softc *sc)
   5562 {
   5563 
   5564 	return (sc->sc_nintrs > 1);
   5565 }
   5566 
   5567 static inline bool
   5568 wm_is_using_multiqueue(struct wm_softc *sc)
   5569 {
   5570 
   5571 	return (sc->sc_nqueues > 1);
   5572 }
   5573 
   5574 static int
   5575 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   5576 {
   5577 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5578 
   5579 	wmq->wmq_id = qidx;
   5580 	wmq->wmq_intr_idx = intr_idx;
   5581 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   5582 	    wm_handle_queue, wmq);
   5583 	if (wmq->wmq_si != NULL)
   5584 		return 0;
   5585 
   5586 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5587 	    wmq->wmq_id);
   5588 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5589 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5590 	return ENOMEM;
   5591 }
   5592 
   5593 /*
   5594  * Both single interrupt MSI and INTx can use this function.
   5595  */
   5596 static int
   5597 wm_setup_legacy(struct wm_softc *sc)
   5598 {
   5599 	pci_chipset_tag_t pc = sc->sc_pc;
   5600 	const char *intrstr = NULL;
   5601 	char intrbuf[PCI_INTRSTR_LEN];
   5602 	int error;
   5603 
   5604 	error = wm_alloc_txrx_queues(sc);
   5605 	if (error) {
   5606 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5607 		    error);
   5608 		return ENOMEM;
   5609 	}
   5610 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5611 	    sizeof(intrbuf));
   5612 #ifdef WM_MPSAFE
   5613 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5614 #endif
   5615 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5616 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5617 	if (sc->sc_ihs[0] == NULL) {
   5618 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5619 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5620 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5621 		return ENOMEM;
   5622 	}
   5623 
   5624 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5625 	sc->sc_nintrs = 1;
   5626 
   5627 	return wm_softint_establish_queue(sc, 0, 0);
   5628 }
   5629 
   5630 static int
   5631 wm_setup_msix(struct wm_softc *sc)
   5632 {
   5633 	void *vih;
   5634 	kcpuset_t *affinity;
   5635 	int qidx, error, intr_idx, txrx_established;
   5636 	pci_chipset_tag_t pc = sc->sc_pc;
   5637 	const char *intrstr = NULL;
   5638 	char intrbuf[PCI_INTRSTR_LEN];
   5639 	char intr_xname[INTRDEVNAMEBUF];
   5640 
   5641 	if (sc->sc_nqueues < ncpu) {
   5642 		/*
   5643 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5644 		 * interrupts start from CPU#1.
   5645 		 */
   5646 		sc->sc_affinity_offset = 1;
   5647 	} else {
   5648 		/*
   5649 		 * In this case, this device use all CPUs. So, we unify
   5650 		 * affinitied cpu_index to msix vector number for readability.
   5651 		 */
   5652 		sc->sc_affinity_offset = 0;
   5653 	}
   5654 
   5655 	error = wm_alloc_txrx_queues(sc);
   5656 	if (error) {
   5657 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5658 		    error);
   5659 		return ENOMEM;
   5660 	}
   5661 
   5662 	kcpuset_create(&affinity, false);
   5663 	intr_idx = 0;
   5664 
   5665 	/*
   5666 	 * TX and RX
   5667 	 */
   5668 	txrx_established = 0;
   5669 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5670 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5671 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5672 
   5673 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5674 		    sizeof(intrbuf));
   5675 #ifdef WM_MPSAFE
   5676 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5677 		    PCI_INTR_MPSAFE, true);
   5678 #endif
   5679 		memset(intr_xname, 0, sizeof(intr_xname));
   5680 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5681 		    device_xname(sc->sc_dev), qidx);
   5682 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5683 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5684 		if (vih == NULL) {
   5685 			aprint_error_dev(sc->sc_dev,
   5686 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5687 			    intrstr ? " at " : "",
   5688 			    intrstr ? intrstr : "");
   5689 
   5690 			goto fail;
   5691 		}
   5692 		kcpuset_zero(affinity);
   5693 		/* Round-robin affinity */
   5694 		kcpuset_set(affinity, affinity_to);
   5695 		error = interrupt_distribute(vih, affinity, NULL);
   5696 		if (error == 0) {
   5697 			aprint_normal_dev(sc->sc_dev,
   5698 			    "for TX and RX interrupting at %s affinity to %u\n",
   5699 			    intrstr, affinity_to);
   5700 		} else {
   5701 			aprint_normal_dev(sc->sc_dev,
   5702 			    "for TX and RX interrupting at %s\n", intrstr);
   5703 		}
   5704 		sc->sc_ihs[intr_idx] = vih;
   5705 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   5706 			goto fail;
   5707 		txrx_established++;
   5708 		intr_idx++;
   5709 	}
   5710 
   5711 	/* LINK */
   5712 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5713 	    sizeof(intrbuf));
   5714 #ifdef WM_MPSAFE
   5715 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5716 #endif
   5717 	memset(intr_xname, 0, sizeof(intr_xname));
   5718 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5719 	    device_xname(sc->sc_dev));
   5720 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5721 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5722 	if (vih == NULL) {
   5723 		aprint_error_dev(sc->sc_dev,
   5724 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5725 		    intrstr ? " at " : "",
   5726 		    intrstr ? intrstr : "");
   5727 
   5728 		goto fail;
   5729 	}
   5730 	/* Keep default affinity to LINK interrupt */
   5731 	aprint_normal_dev(sc->sc_dev,
   5732 	    "for LINK interrupting at %s\n", intrstr);
   5733 	sc->sc_ihs[intr_idx] = vih;
   5734 	sc->sc_link_intr_idx = intr_idx;
   5735 
   5736 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5737 	kcpuset_destroy(affinity);
   5738 	return 0;
   5739 
   5740  fail:
   5741 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5742 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5743 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5744 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5745 	}
   5746 
   5747 	kcpuset_destroy(affinity);
   5748 	return ENOMEM;
   5749 }
   5750 
   5751 static void
   5752 wm_unset_stopping_flags(struct wm_softc *sc)
   5753 {
   5754 	int i;
   5755 
   5756 	KASSERT(WM_CORE_LOCKED(sc));
   5757 
   5758 	/* Must unset stopping flags in ascending order. */
   5759 	for (i = 0; i < sc->sc_nqueues; i++) {
   5760 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5761 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5762 
   5763 		mutex_enter(txq->txq_lock);
   5764 		txq->txq_stopping = false;
   5765 		mutex_exit(txq->txq_lock);
   5766 
   5767 		mutex_enter(rxq->rxq_lock);
   5768 		rxq->rxq_stopping = false;
   5769 		mutex_exit(rxq->rxq_lock);
   5770 	}
   5771 
   5772 	sc->sc_core_stopping = false;
   5773 }
   5774 
   5775 static void
   5776 wm_set_stopping_flags(struct wm_softc *sc)
   5777 {
   5778 	int i;
   5779 
   5780 	KASSERT(WM_CORE_LOCKED(sc));
   5781 
   5782 	sc->sc_core_stopping = true;
   5783 
   5784 	/* Must set stopping flags in ascending order. */
   5785 	for (i = 0; i < sc->sc_nqueues; i++) {
   5786 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5787 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5788 
   5789 		mutex_enter(rxq->rxq_lock);
   5790 		rxq->rxq_stopping = true;
   5791 		mutex_exit(rxq->rxq_lock);
   5792 
   5793 		mutex_enter(txq->txq_lock);
   5794 		txq->txq_stopping = true;
   5795 		mutex_exit(txq->txq_lock);
   5796 	}
   5797 }
   5798 
   5799 /*
   5800  * Write interrupt interval value to ITR or EITR
   5801  */
   5802 static void
   5803 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5804 {
   5805 
   5806 	if (!wmq->wmq_set_itr)
   5807 		return;
   5808 
   5809 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5810 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5811 
   5812 		/*
   5813 		 * 82575 doesn't have CNT_INGR field.
   5814 		 * So, overwrite counter field by software.
   5815 		 */
   5816 		if (sc->sc_type == WM_T_82575)
   5817 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5818 		else
   5819 			eitr |= EITR_CNT_INGR;
   5820 
   5821 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5822 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5823 		/*
   5824 		 * 82574 has both ITR and EITR. SET EITR when we use
   5825 		 * the multi queue function with MSI-X.
   5826 		 */
   5827 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5828 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5829 	} else {
   5830 		KASSERT(wmq->wmq_id == 0);
   5831 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5832 	}
   5833 
   5834 	wmq->wmq_set_itr = false;
   5835 }
   5836 
   5837 /*
   5838  * TODO
   5839  * Below dynamic calculation of itr is almost the same as linux igb,
   5840  * however it does not fit to wm(4). So, we will have been disable AIM
   5841  * until we will find appropriate calculation of itr.
   5842  */
   5843 /*
   5844  * calculate interrupt interval value to be going to write register in
   5845  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5846  */
   5847 static void
   5848 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5849 {
   5850 #ifdef NOTYET
   5851 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5852 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5853 	uint32_t avg_size = 0;
   5854 	uint32_t new_itr;
   5855 
   5856 	if (rxq->rxq_packets)
   5857 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5858 	if (txq->txq_packets)
   5859 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5860 
   5861 	if (avg_size == 0) {
   5862 		new_itr = 450; /* restore default value */
   5863 		goto out;
   5864 	}
   5865 
   5866 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5867 	avg_size += 24;
   5868 
   5869 	/* Don't starve jumbo frames */
   5870 	avg_size = uimin(avg_size, 3000);
   5871 
   5872 	/* Give a little boost to mid-size frames */
   5873 	if ((avg_size > 300) && (avg_size < 1200))
   5874 		new_itr = avg_size / 3;
   5875 	else
   5876 		new_itr = avg_size / 2;
   5877 
   5878 out:
   5879 	/*
   5880 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5881 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5882 	 */
   5883 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5884 		new_itr *= 4;
   5885 
   5886 	if (new_itr != wmq->wmq_itr) {
   5887 		wmq->wmq_itr = new_itr;
   5888 		wmq->wmq_set_itr = true;
   5889 	} else
   5890 		wmq->wmq_set_itr = false;
   5891 
   5892 	rxq->rxq_packets = 0;
   5893 	rxq->rxq_bytes = 0;
   5894 	txq->txq_packets = 0;
   5895 	txq->txq_bytes = 0;
   5896 #endif
   5897 }
   5898 
   5899 static void
   5900 wm_init_sysctls(struct wm_softc *sc)
   5901 {
   5902 	struct sysctllog **log;
   5903 	const struct sysctlnode *rnode, *qnode, *cnode;
   5904 	int i, rv;
   5905 	const char *dvname;
   5906 
   5907 	log = &sc->sc_sysctllog;
   5908 	dvname = device_xname(sc->sc_dev);
   5909 
   5910 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5911 	    0, CTLTYPE_NODE, dvname,
   5912 	    SYSCTL_DESCR("wm information and settings"),
   5913 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5914 	if (rv != 0)
   5915 		goto err;
   5916 
   5917 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5918 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5919 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5920 	if (rv != 0)
   5921 		goto teardown;
   5922 
   5923 	for (i = 0; i < sc->sc_nqueues; i++) {
   5924 		struct wm_queue *wmq = &sc->sc_queue[i];
   5925 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5926 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5927 
   5928 		snprintf(sc->sc_queue[i].sysctlname,
   5929 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   5930 
   5931 		if (sysctl_createv(log, 0, &rnode, &qnode,
   5932 		    0, CTLTYPE_NODE,
   5933 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   5934 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   5935 			break;
   5936 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5937 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5938 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   5939 		    NULL, 0, &txq->txq_free,
   5940 		    0, CTL_CREATE, CTL_EOL) != 0)
   5941 			break;
   5942 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5943 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5944 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   5945 		    NULL, 0, &txq->txq_next,
   5946 		    0, CTL_CREATE, CTL_EOL) != 0)
   5947 			break;
   5948 
   5949 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5950 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5951 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   5952 		    NULL, 0, &rxq->rxq_ptr,
   5953 		    0, CTL_CREATE, CTL_EOL) != 0)
   5954 			break;
   5955 	}
   5956 
   5957 #ifdef WM_DEBUG
   5958 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5959 	    CTLTYPE_INT, "debug_flags",
   5960 	    SYSCTL_DESCR(
   5961 		    "Debug flags:\n"	\
   5962 		    "\t0x01 LINK\n"	\
   5963 		    "\t0x02 TX\n"	\
   5964 		    "\t0x04 RX\n"	\
   5965 		    "\t0x08 GMII\n"	\
   5966 		    "\t0x10 MANAGE\n"	\
   5967 		    "\t0x20 NVM\n"	\
   5968 		    "\t0x40 INIT\n"	\
   5969 		    "\t0x80 LOCK"),
   5970 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   5971 	if (rv != 0)
   5972 		goto teardown;
   5973 #endif
   5974 
   5975 	return;
   5976 
   5977 teardown:
   5978 	sysctl_teardown(log);
   5979 err:
   5980 	sc->sc_sysctllog = NULL;
   5981 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   5982 	    __func__, rv);
   5983 }
   5984 
   5985 /*
   5986  * wm_init:		[ifnet interface function]
   5987  *
   5988  *	Initialize the interface.
   5989  */
   5990 static int
   5991 wm_init(struct ifnet *ifp)
   5992 {
   5993 	struct wm_softc *sc = ifp->if_softc;
   5994 	int ret;
   5995 
   5996 	WM_CORE_LOCK(sc);
   5997 	ret = wm_init_locked(ifp);
   5998 	WM_CORE_UNLOCK(sc);
   5999 
   6000 	return ret;
   6001 }
   6002 
   6003 static int
   6004 wm_init_locked(struct ifnet *ifp)
   6005 {
   6006 	struct wm_softc *sc = ifp->if_softc;
   6007 	struct ethercom *ec = &sc->sc_ethercom;
   6008 	int i, j, trynum, error = 0;
   6009 	uint32_t reg, sfp_mask = 0;
   6010 
   6011 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6012 		device_xname(sc->sc_dev), __func__));
   6013 	KASSERT(WM_CORE_LOCKED(sc));
   6014 
   6015 	/*
   6016 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6017 	 * There is a small but measurable benefit to avoiding the adjusment
   6018 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6019 	 * on such platforms.  One possibility is that the DMA itself is
   6020 	 * slightly more efficient if the front of the entire packet (instead
   6021 	 * of the front of the headers) is aligned.
   6022 	 *
   6023 	 * Note we must always set align_tweak to 0 if we are using
   6024 	 * jumbo frames.
   6025 	 */
   6026 #ifdef __NO_STRICT_ALIGNMENT
   6027 	sc->sc_align_tweak = 0;
   6028 #else
   6029 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6030 		sc->sc_align_tweak = 0;
   6031 	else
   6032 		sc->sc_align_tweak = 2;
   6033 #endif /* __NO_STRICT_ALIGNMENT */
   6034 
   6035 	/* Cancel any pending I/O. */
   6036 	wm_stop_locked(ifp, false, false);
   6037 
   6038 	/* Update statistics before reset */
   6039 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6040 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6041 
   6042 	/* PCH_SPT hardware workaround */
   6043 	if (sc->sc_type == WM_T_PCH_SPT)
   6044 		wm_flush_desc_rings(sc);
   6045 
   6046 	/* Reset the chip to a known state. */
   6047 	wm_reset(sc);
   6048 
   6049 	/*
   6050 	 * AMT based hardware can now take control from firmware
   6051 	 * Do this after reset.
   6052 	 */
   6053 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6054 		wm_get_hw_control(sc);
   6055 
   6056 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6057 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6058 		wm_legacy_irq_quirk_spt(sc);
   6059 
   6060 	/* Init hardware bits */
   6061 	wm_initialize_hardware_bits(sc);
   6062 
   6063 	/* Reset the PHY. */
   6064 	if (sc->sc_flags & WM_F_HAS_MII)
   6065 		wm_gmii_reset(sc);
   6066 
   6067 	if (sc->sc_type >= WM_T_ICH8) {
   6068 		reg = CSR_READ(sc, WMREG_GCR);
   6069 		/*
   6070 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6071 		 * default after reset.
   6072 		 */
   6073 		if (sc->sc_type == WM_T_ICH8)
   6074 			reg |= GCR_NO_SNOOP_ALL;
   6075 		else
   6076 			reg &= ~GCR_NO_SNOOP_ALL;
   6077 		CSR_WRITE(sc, WMREG_GCR, reg);
   6078 	}
   6079 
   6080 	if ((sc->sc_type >= WM_T_ICH8)
   6081 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6082 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6083 
   6084 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6085 		reg |= CTRL_EXT_RO_DIS;
   6086 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6087 	}
   6088 
   6089 	/* Calculate (E)ITR value */
   6090 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6091 		/*
   6092 		 * For NEWQUEUE's EITR (except for 82575).
   6093 		 * 82575's EITR should be set same throttling value as other
   6094 		 * old controllers' ITR because the interrupt/sec calculation
   6095 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6096 		 *
   6097 		 * 82574's EITR should be set same throttling value as ITR.
   6098 		 *
   6099 		 * For N interrupts/sec, set this value to:
   6100 		 * 1,000,000 / N in contrast to ITR throttoling value.
   6101 		 */
   6102 		sc->sc_itr_init = 450;
   6103 	} else if (sc->sc_type >= WM_T_82543) {
   6104 		/*
   6105 		 * Set up the interrupt throttling register (units of 256ns)
   6106 		 * Note that a footnote in Intel's documentation says this
   6107 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6108 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6109 		 * that that is also true for the 1024ns units of the other
   6110 		 * interrupt-related timer registers -- so, really, we ought
   6111 		 * to divide this value by 4 when the link speed is low.
   6112 		 *
   6113 		 * XXX implement this division at link speed change!
   6114 		 */
   6115 
   6116 		/*
   6117 		 * For N interrupts/sec, set this value to:
   6118 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6119 		 * absolute and packet timer values to this value
   6120 		 * divided by 4 to get "simple timer" behavior.
   6121 		 */
   6122 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6123 	}
   6124 
   6125 	error = wm_init_txrx_queues(sc);
   6126 	if (error)
   6127 		goto out;
   6128 
   6129 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6130 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6131 	    (sc->sc_type >= WM_T_82575))
   6132 		wm_serdes_power_up_link_82575(sc);
   6133 
   6134 	/* Clear out the VLAN table -- we don't use it (yet). */
   6135 	CSR_WRITE(sc, WMREG_VET, 0);
   6136 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6137 		trynum = 10; /* Due to hw errata */
   6138 	else
   6139 		trynum = 1;
   6140 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6141 		for (j = 0; j < trynum; j++)
   6142 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6143 
   6144 	/*
   6145 	 * Set up flow-control parameters.
   6146 	 *
   6147 	 * XXX Values could probably stand some tuning.
   6148 	 */
   6149 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6150 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6151 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6152 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6153 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6154 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6155 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6156 	}
   6157 
   6158 	sc->sc_fcrtl = FCRTL_DFLT;
   6159 	if (sc->sc_type < WM_T_82543) {
   6160 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6161 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6162 	} else {
   6163 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6164 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6165 	}
   6166 
   6167 	if (sc->sc_type == WM_T_80003)
   6168 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6169 	else
   6170 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6171 
   6172 	/* Writes the control register. */
   6173 	wm_set_vlan(sc);
   6174 
   6175 	if (sc->sc_flags & WM_F_HAS_MII) {
   6176 		uint16_t kmreg;
   6177 
   6178 		switch (sc->sc_type) {
   6179 		case WM_T_80003:
   6180 		case WM_T_ICH8:
   6181 		case WM_T_ICH9:
   6182 		case WM_T_ICH10:
   6183 		case WM_T_PCH:
   6184 		case WM_T_PCH2:
   6185 		case WM_T_PCH_LPT:
   6186 		case WM_T_PCH_SPT:
   6187 		case WM_T_PCH_CNP:
   6188 			/*
   6189 			 * Set the mac to wait the maximum time between each
   6190 			 * iteration and increase the max iterations when
   6191 			 * polling the phy; this fixes erroneous timeouts at
   6192 			 * 10Mbps.
   6193 			 */
   6194 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6195 			    0xFFFF);
   6196 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6197 			    &kmreg);
   6198 			kmreg |= 0x3F;
   6199 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6200 			    kmreg);
   6201 			break;
   6202 		default:
   6203 			break;
   6204 		}
   6205 
   6206 		if (sc->sc_type == WM_T_80003) {
   6207 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6208 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6209 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6210 
   6211 			/* Bypass RX and TX FIFO's */
   6212 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6213 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6214 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6215 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6216 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6217 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6218 		}
   6219 	}
   6220 #if 0
   6221 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6222 #endif
   6223 
   6224 	/* Set up checksum offload parameters. */
   6225 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6226 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6227 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6228 		reg |= RXCSUM_IPOFL;
   6229 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6230 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6231 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6232 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6233 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6234 
   6235 	/* Set registers about MSI-X */
   6236 	if (wm_is_using_msix(sc)) {
   6237 		uint32_t ivar, qintr_idx;
   6238 		struct wm_queue *wmq;
   6239 		unsigned int qid;
   6240 
   6241 		if (sc->sc_type == WM_T_82575) {
   6242 			/* Interrupt control */
   6243 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6244 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6245 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6246 
   6247 			/* TX and RX */
   6248 			for (i = 0; i < sc->sc_nqueues; i++) {
   6249 				wmq = &sc->sc_queue[i];
   6250 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6251 				    EITR_TX_QUEUE(wmq->wmq_id)
   6252 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6253 			}
   6254 			/* Link status */
   6255 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6256 			    EITR_OTHER);
   6257 		} else if (sc->sc_type == WM_T_82574) {
   6258 			/* Interrupt control */
   6259 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6260 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6261 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6262 
   6263 			/*
   6264 			 * Workaround issue with spurious interrupts
   6265 			 * in MSI-X mode.
   6266 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6267 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6268 			 */
   6269 			reg = CSR_READ(sc, WMREG_RFCTL);
   6270 			reg |= WMREG_RFCTL_ACKDIS;
   6271 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6272 
   6273 			ivar = 0;
   6274 			/* TX and RX */
   6275 			for (i = 0; i < sc->sc_nqueues; i++) {
   6276 				wmq = &sc->sc_queue[i];
   6277 				qid = wmq->wmq_id;
   6278 				qintr_idx = wmq->wmq_intr_idx;
   6279 
   6280 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6281 				    IVAR_TX_MASK_Q_82574(qid));
   6282 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6283 				    IVAR_RX_MASK_Q_82574(qid));
   6284 			}
   6285 			/* Link status */
   6286 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6287 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6288 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6289 		} else {
   6290 			/* Interrupt control */
   6291 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6292 			    | GPIE_EIAME | GPIE_PBA);
   6293 
   6294 			switch (sc->sc_type) {
   6295 			case WM_T_82580:
   6296 			case WM_T_I350:
   6297 			case WM_T_I354:
   6298 			case WM_T_I210:
   6299 			case WM_T_I211:
   6300 				/* TX and RX */
   6301 				for (i = 0; i < sc->sc_nqueues; i++) {
   6302 					wmq = &sc->sc_queue[i];
   6303 					qid = wmq->wmq_id;
   6304 					qintr_idx = wmq->wmq_intr_idx;
   6305 
   6306 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6307 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6308 					ivar |= __SHIFTIN((qintr_idx
   6309 						| IVAR_VALID),
   6310 					    IVAR_TX_MASK_Q(qid));
   6311 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6312 					ivar |= __SHIFTIN((qintr_idx
   6313 						| IVAR_VALID),
   6314 					    IVAR_RX_MASK_Q(qid));
   6315 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6316 				}
   6317 				break;
   6318 			case WM_T_82576:
   6319 				/* TX and RX */
   6320 				for (i = 0; i < sc->sc_nqueues; i++) {
   6321 					wmq = &sc->sc_queue[i];
   6322 					qid = wmq->wmq_id;
   6323 					qintr_idx = wmq->wmq_intr_idx;
   6324 
   6325 					ivar = CSR_READ(sc,
   6326 					    WMREG_IVAR_Q_82576(qid));
   6327 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6328 					ivar |= __SHIFTIN((qintr_idx
   6329 						| IVAR_VALID),
   6330 					    IVAR_TX_MASK_Q_82576(qid));
   6331 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6332 					ivar |= __SHIFTIN((qintr_idx
   6333 						| IVAR_VALID),
   6334 					    IVAR_RX_MASK_Q_82576(qid));
   6335 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6336 					    ivar);
   6337 				}
   6338 				break;
   6339 			default:
   6340 				break;
   6341 			}
   6342 
   6343 			/* Link status */
   6344 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6345 			    IVAR_MISC_OTHER);
   6346 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6347 		}
   6348 
   6349 		if (wm_is_using_multiqueue(sc)) {
   6350 			wm_init_rss(sc);
   6351 
   6352 			/*
   6353 			** NOTE: Receive Full-Packet Checksum Offload
   6354 			** is mutually exclusive with Multiqueue. However
   6355 			** this is not the same as TCP/IP checksums which
   6356 			** still work.
   6357 			*/
   6358 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6359 			reg |= RXCSUM_PCSD;
   6360 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6361 		}
   6362 	}
   6363 
   6364 	/* Set up the interrupt registers. */
   6365 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6366 
   6367 	/* Enable SFP module insertion interrupt if it's required */
   6368 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6369 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6370 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6371 		sfp_mask = ICR_GPI(0);
   6372 	}
   6373 
   6374 	if (wm_is_using_msix(sc)) {
   6375 		uint32_t mask;
   6376 		struct wm_queue *wmq;
   6377 
   6378 		switch (sc->sc_type) {
   6379 		case WM_T_82574:
   6380 			mask = 0;
   6381 			for (i = 0; i < sc->sc_nqueues; i++) {
   6382 				wmq = &sc->sc_queue[i];
   6383 				mask |= ICR_TXQ(wmq->wmq_id);
   6384 				mask |= ICR_RXQ(wmq->wmq_id);
   6385 			}
   6386 			mask |= ICR_OTHER;
   6387 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6388 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6389 			break;
   6390 		default:
   6391 			if (sc->sc_type == WM_T_82575) {
   6392 				mask = 0;
   6393 				for (i = 0; i < sc->sc_nqueues; i++) {
   6394 					wmq = &sc->sc_queue[i];
   6395 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6396 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6397 				}
   6398 				mask |= EITR_OTHER;
   6399 			} else {
   6400 				mask = 0;
   6401 				for (i = 0; i < sc->sc_nqueues; i++) {
   6402 					wmq = &sc->sc_queue[i];
   6403 					mask |= 1 << wmq->wmq_intr_idx;
   6404 				}
   6405 				mask |= 1 << sc->sc_link_intr_idx;
   6406 			}
   6407 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6408 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6409 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6410 
   6411 			/* For other interrupts */
   6412 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6413 			break;
   6414 		}
   6415 	} else {
   6416 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6417 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6418 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6419 	}
   6420 
   6421 	/* Set up the inter-packet gap. */
   6422 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6423 
   6424 	if (sc->sc_type >= WM_T_82543) {
   6425 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6426 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6427 			wm_itrs_writereg(sc, wmq);
   6428 		}
   6429 		/*
   6430 		 * Link interrupts occur much less than TX
   6431 		 * interrupts and RX interrupts. So, we don't
   6432 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6433 		 * FreeBSD's if_igb.
   6434 		 */
   6435 	}
   6436 
   6437 	/* Set the VLAN ethernetype. */
   6438 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6439 
   6440 	/*
   6441 	 * Set up the transmit control register; we start out with
   6442 	 * a collision distance suitable for FDX, but update it whe
   6443 	 * we resolve the media type.
   6444 	 */
   6445 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6446 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6447 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6448 	if (sc->sc_type >= WM_T_82571)
   6449 		sc->sc_tctl |= TCTL_MULR;
   6450 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6451 
   6452 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6453 		/* Write TDT after TCTL.EN is set. See the document. */
   6454 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6455 	}
   6456 
   6457 	if (sc->sc_type == WM_T_80003) {
   6458 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6459 		reg &= ~TCTL_EXT_GCEX_MASK;
   6460 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6461 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6462 	}
   6463 
   6464 	/* Set the media. */
   6465 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6466 		goto out;
   6467 
   6468 	/* Configure for OS presence */
   6469 	wm_init_manageability(sc);
   6470 
   6471 	/*
   6472 	 * Set up the receive control register; we actually program the
   6473 	 * register when we set the receive filter. Use multicast address
   6474 	 * offset type 0.
   6475 	 *
   6476 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6477 	 * don't enable that feature.
   6478 	 */
   6479 	sc->sc_mchash_type = 0;
   6480 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6481 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6482 
   6483 	/* 82574 use one buffer extended Rx descriptor. */
   6484 	if (sc->sc_type == WM_T_82574)
   6485 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6486 
   6487 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   6488 		sc->sc_rctl |= RCTL_SECRC;
   6489 
   6490 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6491 	    && (ifp->if_mtu > ETHERMTU)) {
   6492 		sc->sc_rctl |= RCTL_LPE;
   6493 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6494 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6495 	}
   6496 
   6497 	if (MCLBYTES == 2048)
   6498 		sc->sc_rctl |= RCTL_2k;
   6499 	else {
   6500 		if (sc->sc_type >= WM_T_82543) {
   6501 			switch (MCLBYTES) {
   6502 			case 4096:
   6503 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6504 				break;
   6505 			case 8192:
   6506 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6507 				break;
   6508 			case 16384:
   6509 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6510 				break;
   6511 			default:
   6512 				panic("wm_init: MCLBYTES %d unsupported",
   6513 				    MCLBYTES);
   6514 				break;
   6515 			}
   6516 		} else
   6517 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6518 	}
   6519 
   6520 	/* Enable ECC */
   6521 	switch (sc->sc_type) {
   6522 	case WM_T_82571:
   6523 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6524 		reg |= PBA_ECC_CORR_EN;
   6525 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6526 		break;
   6527 	case WM_T_PCH_LPT:
   6528 	case WM_T_PCH_SPT:
   6529 	case WM_T_PCH_CNP:
   6530 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6531 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6532 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6533 
   6534 		sc->sc_ctrl |= CTRL_MEHE;
   6535 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6536 		break;
   6537 	default:
   6538 		break;
   6539 	}
   6540 
   6541 	/*
   6542 	 * Set the receive filter.
   6543 	 *
   6544 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6545 	 * the setting of RCTL.EN in wm_set_filter()
   6546 	 */
   6547 	wm_set_filter(sc);
   6548 
   6549 	/* On 575 and later set RDT only if RX enabled */
   6550 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6551 		int qidx;
   6552 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6553 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6554 			for (i = 0; i < WM_NRXDESC; i++) {
   6555 				mutex_enter(rxq->rxq_lock);
   6556 				wm_init_rxdesc(rxq, i);
   6557 				mutex_exit(rxq->rxq_lock);
   6558 
   6559 			}
   6560 		}
   6561 	}
   6562 
   6563 	wm_unset_stopping_flags(sc);
   6564 
   6565 	/* Start the one second link check clock. */
   6566 	callout_schedule(&sc->sc_tick_ch, hz);
   6567 
   6568 	/* ...all done! */
   6569 	ifp->if_flags |= IFF_RUNNING;
   6570 
   6571  out:
   6572 	/* Save last flags for the callback */
   6573 	sc->sc_if_flags = ifp->if_flags;
   6574 	sc->sc_ec_capenable = ec->ec_capenable;
   6575 	if (error)
   6576 		log(LOG_ERR, "%s: interface not running\n",
   6577 		    device_xname(sc->sc_dev));
   6578 	return error;
   6579 }
   6580 
   6581 /*
   6582  * wm_stop:		[ifnet interface function]
   6583  *
   6584  *	Stop transmission on the interface.
   6585  */
   6586 static void
   6587 wm_stop(struct ifnet *ifp, int disable)
   6588 {
   6589 	struct wm_softc *sc = ifp->if_softc;
   6590 
   6591 	ASSERT_SLEEPABLE();
   6592 
   6593 	WM_CORE_LOCK(sc);
   6594 	wm_stop_locked(ifp, disable ? true : false, true);
   6595 	WM_CORE_UNLOCK(sc);
   6596 
   6597 	/*
   6598 	 * After wm_set_stopping_flags(), it is guaranteed
   6599 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6600 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6601 	 * because it can sleep...
   6602 	 * so, call workqueue_wait() here.
   6603 	 */
   6604 	for (int i = 0; i < sc->sc_nqueues; i++)
   6605 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6606 }
   6607 
   6608 static void
   6609 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   6610 {
   6611 	struct wm_softc *sc = ifp->if_softc;
   6612 	struct wm_txsoft *txs;
   6613 	int i, qidx;
   6614 
   6615 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6616 		device_xname(sc->sc_dev), __func__));
   6617 	KASSERT(WM_CORE_LOCKED(sc));
   6618 
   6619 	wm_set_stopping_flags(sc);
   6620 
   6621 	if (sc->sc_flags & WM_F_HAS_MII) {
   6622 		/* Down the MII. */
   6623 		mii_down(&sc->sc_mii);
   6624 	} else {
   6625 #if 0
   6626 		/* Should we clear PHY's status properly? */
   6627 		wm_reset(sc);
   6628 #endif
   6629 	}
   6630 
   6631 	/* Stop the transmit and receive processes. */
   6632 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6633 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6634 	sc->sc_rctl &= ~RCTL_EN;
   6635 
   6636 	/*
   6637 	 * Clear the interrupt mask to ensure the device cannot assert its
   6638 	 * interrupt line.
   6639 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6640 	 * service any currently pending or shared interrupt.
   6641 	 */
   6642 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6643 	sc->sc_icr = 0;
   6644 	if (wm_is_using_msix(sc)) {
   6645 		if (sc->sc_type != WM_T_82574) {
   6646 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6647 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6648 		} else
   6649 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6650 	}
   6651 
   6652 	/*
   6653 	 * Stop callouts after interrupts are disabled; if we have
   6654 	 * to wait for them, we will be releasing the CORE_LOCK
   6655 	 * briefly, which will unblock interrupts on the current CPU.
   6656 	 */
   6657 
   6658 	/* Stop the one second clock. */
   6659 	if (wait)
   6660 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   6661 	else
   6662 		callout_stop(&sc->sc_tick_ch);
   6663 
   6664 	/* Stop the 82547 Tx FIFO stall check timer. */
   6665 	if (sc->sc_type == WM_T_82547) {
   6666 		if (wait)
   6667 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   6668 		else
   6669 			callout_stop(&sc->sc_txfifo_ch);
   6670 	}
   6671 
   6672 	/* Release any queued transmit buffers. */
   6673 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6674 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6675 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6676 		struct mbuf *m;
   6677 
   6678 		mutex_enter(txq->txq_lock);
   6679 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6680 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6681 			txs = &txq->txq_soft[i];
   6682 			if (txs->txs_mbuf != NULL) {
   6683 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6684 				m_freem(txs->txs_mbuf);
   6685 				txs->txs_mbuf = NULL;
   6686 			}
   6687 		}
   6688 		/* Drain txq_interq */
   6689 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6690 			m_freem(m);
   6691 		mutex_exit(txq->txq_lock);
   6692 	}
   6693 
   6694 	/* Mark the interface as down and cancel the watchdog timer. */
   6695 	ifp->if_flags &= ~IFF_RUNNING;
   6696 
   6697 	if (disable) {
   6698 		for (i = 0; i < sc->sc_nqueues; i++) {
   6699 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6700 			mutex_enter(rxq->rxq_lock);
   6701 			wm_rxdrain(rxq);
   6702 			mutex_exit(rxq->rxq_lock);
   6703 		}
   6704 	}
   6705 
   6706 #if 0 /* notyet */
   6707 	if (sc->sc_type >= WM_T_82544)
   6708 		CSR_WRITE(sc, WMREG_WUC, 0);
   6709 #endif
   6710 }
   6711 
   6712 static void
   6713 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6714 {
   6715 	struct mbuf *m;
   6716 	int i;
   6717 
   6718 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6719 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6720 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6721 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6722 		    m->m_data, m->m_len, m->m_flags);
   6723 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6724 	    i, i == 1 ? "" : "s");
   6725 }
   6726 
   6727 /*
   6728  * wm_82547_txfifo_stall:
   6729  *
   6730  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6731  *	reset the FIFO pointers, and restart packet transmission.
   6732  */
   6733 static void
   6734 wm_82547_txfifo_stall(void *arg)
   6735 {
   6736 	struct wm_softc *sc = arg;
   6737 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6738 
   6739 	mutex_enter(txq->txq_lock);
   6740 
   6741 	if (txq->txq_stopping)
   6742 		goto out;
   6743 
   6744 	if (txq->txq_fifo_stall) {
   6745 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6746 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6747 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6748 			/*
   6749 			 * Packets have drained.  Stop transmitter, reset
   6750 			 * FIFO pointers, restart transmitter, and kick
   6751 			 * the packet queue.
   6752 			 */
   6753 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6754 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6755 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6756 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6757 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6758 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6759 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6760 			CSR_WRITE_FLUSH(sc);
   6761 
   6762 			txq->txq_fifo_head = 0;
   6763 			txq->txq_fifo_stall = 0;
   6764 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6765 		} else {
   6766 			/*
   6767 			 * Still waiting for packets to drain; try again in
   6768 			 * another tick.
   6769 			 */
   6770 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6771 		}
   6772 	}
   6773 
   6774 out:
   6775 	mutex_exit(txq->txq_lock);
   6776 }
   6777 
   6778 /*
   6779  * wm_82547_txfifo_bugchk:
   6780  *
   6781  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6782  *	prevent enqueueing a packet that would wrap around the end
   6783  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6784  *
   6785  *	We do this by checking the amount of space before the end
   6786  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6787  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6788  *	the internal FIFO pointers to the beginning, and restart
   6789  *	transmission on the interface.
   6790  */
   6791 #define	WM_FIFO_HDR		0x10
   6792 #define	WM_82547_PAD_LEN	0x3e0
   6793 static int
   6794 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6795 {
   6796 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6797 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6798 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6799 
   6800 	/* Just return if already stalled. */
   6801 	if (txq->txq_fifo_stall)
   6802 		return 1;
   6803 
   6804 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6805 		/* Stall only occurs in half-duplex mode. */
   6806 		goto send_packet;
   6807 	}
   6808 
   6809 	if (len >= WM_82547_PAD_LEN + space) {
   6810 		txq->txq_fifo_stall = 1;
   6811 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6812 		return 1;
   6813 	}
   6814 
   6815  send_packet:
   6816 	txq->txq_fifo_head += len;
   6817 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6818 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6819 
   6820 	return 0;
   6821 }
   6822 
   6823 static int
   6824 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6825 {
   6826 	int error;
   6827 
   6828 	/*
   6829 	 * Allocate the control data structures, and create and load the
   6830 	 * DMA map for it.
   6831 	 *
   6832 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6833 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6834 	 * both sets within the same 4G segment.
   6835 	 */
   6836 	if (sc->sc_type < WM_T_82544)
   6837 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6838 	else
   6839 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6840 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6841 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6842 	else
   6843 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6844 
   6845 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6846 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6847 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6848 		aprint_error_dev(sc->sc_dev,
   6849 		    "unable to allocate TX control data, error = %d\n",
   6850 		    error);
   6851 		goto fail_0;
   6852 	}
   6853 
   6854 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6855 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6856 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6857 		aprint_error_dev(sc->sc_dev,
   6858 		    "unable to map TX control data, error = %d\n", error);
   6859 		goto fail_1;
   6860 	}
   6861 
   6862 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6863 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6864 		aprint_error_dev(sc->sc_dev,
   6865 		    "unable to create TX control data DMA map, error = %d\n",
   6866 		    error);
   6867 		goto fail_2;
   6868 	}
   6869 
   6870 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6871 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6872 		aprint_error_dev(sc->sc_dev,
   6873 		    "unable to load TX control data DMA map, error = %d\n",
   6874 		    error);
   6875 		goto fail_3;
   6876 	}
   6877 
   6878 	return 0;
   6879 
   6880  fail_3:
   6881 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6882  fail_2:
   6883 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6884 	    WM_TXDESCS_SIZE(txq));
   6885  fail_1:
   6886 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6887  fail_0:
   6888 	return error;
   6889 }
   6890 
   6891 static void
   6892 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6893 {
   6894 
   6895 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6896 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6897 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6898 	    WM_TXDESCS_SIZE(txq));
   6899 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6900 }
   6901 
   6902 static int
   6903 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6904 {
   6905 	int error;
   6906 	size_t rxq_descs_size;
   6907 
   6908 	/*
   6909 	 * Allocate the control data structures, and create and load the
   6910 	 * DMA map for it.
   6911 	 *
   6912 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6913 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6914 	 * both sets within the same 4G segment.
   6915 	 */
   6916 	rxq->rxq_ndesc = WM_NRXDESC;
   6917 	if (sc->sc_type == WM_T_82574)
   6918 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6919 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6920 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6921 	else
   6922 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6923 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6924 
   6925 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6926 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6927 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6928 		aprint_error_dev(sc->sc_dev,
   6929 		    "unable to allocate RX control data, error = %d\n",
   6930 		    error);
   6931 		goto fail_0;
   6932 	}
   6933 
   6934 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6935 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6936 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6937 		aprint_error_dev(sc->sc_dev,
   6938 		    "unable to map RX control data, error = %d\n", error);
   6939 		goto fail_1;
   6940 	}
   6941 
   6942 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6943 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6944 		aprint_error_dev(sc->sc_dev,
   6945 		    "unable to create RX control data DMA map, error = %d\n",
   6946 		    error);
   6947 		goto fail_2;
   6948 	}
   6949 
   6950 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6951 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6952 		aprint_error_dev(sc->sc_dev,
   6953 		    "unable to load RX control data DMA map, error = %d\n",
   6954 		    error);
   6955 		goto fail_3;
   6956 	}
   6957 
   6958 	return 0;
   6959 
   6960  fail_3:
   6961 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6962  fail_2:
   6963 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6964 	    rxq_descs_size);
   6965  fail_1:
   6966 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6967  fail_0:
   6968 	return error;
   6969 }
   6970 
   6971 static void
   6972 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6973 {
   6974 
   6975 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6976 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6977 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6978 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6979 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6980 }
   6981 
   6982 
   6983 static int
   6984 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6985 {
   6986 	int i, error;
   6987 
   6988 	/* Create the transmit buffer DMA maps. */
   6989 	WM_TXQUEUELEN(txq) =
   6990 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6991 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6992 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6993 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6994 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6995 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6996 			aprint_error_dev(sc->sc_dev,
   6997 			    "unable to create Tx DMA map %d, error = %d\n",
   6998 			    i, error);
   6999 			goto fail;
   7000 		}
   7001 	}
   7002 
   7003 	return 0;
   7004 
   7005  fail:
   7006 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7007 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7008 			bus_dmamap_destroy(sc->sc_dmat,
   7009 			    txq->txq_soft[i].txs_dmamap);
   7010 	}
   7011 	return error;
   7012 }
   7013 
   7014 static void
   7015 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7016 {
   7017 	int i;
   7018 
   7019 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7020 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7021 			bus_dmamap_destroy(sc->sc_dmat,
   7022 			    txq->txq_soft[i].txs_dmamap);
   7023 	}
   7024 }
   7025 
   7026 static int
   7027 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7028 {
   7029 	int i, error;
   7030 
   7031 	/* Create the receive buffer DMA maps. */
   7032 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7033 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7034 			    MCLBYTES, 0, 0,
   7035 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7036 			aprint_error_dev(sc->sc_dev,
   7037 			    "unable to create Rx DMA map %d error = %d\n",
   7038 			    i, error);
   7039 			goto fail;
   7040 		}
   7041 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7042 	}
   7043 
   7044 	return 0;
   7045 
   7046  fail:
   7047 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7048 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7049 			bus_dmamap_destroy(sc->sc_dmat,
   7050 			    rxq->rxq_soft[i].rxs_dmamap);
   7051 	}
   7052 	return error;
   7053 }
   7054 
   7055 static void
   7056 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7057 {
   7058 	int i;
   7059 
   7060 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7061 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7062 			bus_dmamap_destroy(sc->sc_dmat,
   7063 			    rxq->rxq_soft[i].rxs_dmamap);
   7064 	}
   7065 }
   7066 
   7067 /*
   7068  * wm_alloc_quques:
   7069  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7070  */
   7071 static int
   7072 wm_alloc_txrx_queues(struct wm_softc *sc)
   7073 {
   7074 	int i, error, tx_done, rx_done;
   7075 
   7076 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7077 	    KM_SLEEP);
   7078 	if (sc->sc_queue == NULL) {
   7079 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7080 		error = ENOMEM;
   7081 		goto fail_0;
   7082 	}
   7083 
   7084 	/* For transmission */
   7085 	error = 0;
   7086 	tx_done = 0;
   7087 	for (i = 0; i < sc->sc_nqueues; i++) {
   7088 #ifdef WM_EVENT_COUNTERS
   7089 		int j;
   7090 		const char *xname;
   7091 #endif
   7092 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7093 		txq->txq_sc = sc;
   7094 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7095 
   7096 		error = wm_alloc_tx_descs(sc, txq);
   7097 		if (error)
   7098 			break;
   7099 		error = wm_alloc_tx_buffer(sc, txq);
   7100 		if (error) {
   7101 			wm_free_tx_descs(sc, txq);
   7102 			break;
   7103 		}
   7104 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7105 		if (txq->txq_interq == NULL) {
   7106 			wm_free_tx_descs(sc, txq);
   7107 			wm_free_tx_buffer(sc, txq);
   7108 			error = ENOMEM;
   7109 			break;
   7110 		}
   7111 
   7112 #ifdef WM_EVENT_COUNTERS
   7113 		xname = device_xname(sc->sc_dev);
   7114 
   7115 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7116 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7117 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7118 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7119 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7120 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7121 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7122 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7123 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7124 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7125 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7126 
   7127 		for (j = 0; j < WM_NTXSEGS; j++) {
   7128 			snprintf(txq->txq_txseg_evcnt_names[j],
   7129 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   7130 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   7131 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7132 		}
   7133 
   7134 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7135 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7136 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7137 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7138 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7139 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7140 #endif /* WM_EVENT_COUNTERS */
   7141 
   7142 		tx_done++;
   7143 	}
   7144 	if (error)
   7145 		goto fail_1;
   7146 
   7147 	/* For receive */
   7148 	error = 0;
   7149 	rx_done = 0;
   7150 	for (i = 0; i < sc->sc_nqueues; i++) {
   7151 #ifdef WM_EVENT_COUNTERS
   7152 		const char *xname;
   7153 #endif
   7154 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7155 		rxq->rxq_sc = sc;
   7156 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7157 
   7158 		error = wm_alloc_rx_descs(sc, rxq);
   7159 		if (error)
   7160 			break;
   7161 
   7162 		error = wm_alloc_rx_buffer(sc, rxq);
   7163 		if (error) {
   7164 			wm_free_rx_descs(sc, rxq);
   7165 			break;
   7166 		}
   7167 
   7168 #ifdef WM_EVENT_COUNTERS
   7169 		xname = device_xname(sc->sc_dev);
   7170 
   7171 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7172 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7173 
   7174 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7175 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7176 #endif /* WM_EVENT_COUNTERS */
   7177 
   7178 		rx_done++;
   7179 	}
   7180 	if (error)
   7181 		goto fail_2;
   7182 
   7183 	return 0;
   7184 
   7185  fail_2:
   7186 	for (i = 0; i < rx_done; i++) {
   7187 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7188 		wm_free_rx_buffer(sc, rxq);
   7189 		wm_free_rx_descs(sc, rxq);
   7190 		if (rxq->rxq_lock)
   7191 			mutex_obj_free(rxq->rxq_lock);
   7192 	}
   7193  fail_1:
   7194 	for (i = 0; i < tx_done; i++) {
   7195 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7196 		pcq_destroy(txq->txq_interq);
   7197 		wm_free_tx_buffer(sc, txq);
   7198 		wm_free_tx_descs(sc, txq);
   7199 		if (txq->txq_lock)
   7200 			mutex_obj_free(txq->txq_lock);
   7201 	}
   7202 
   7203 	kmem_free(sc->sc_queue,
   7204 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7205  fail_0:
   7206 	return error;
   7207 }
   7208 
   7209 /*
   7210  * wm_free_quques:
   7211  *	Free {tx,rx}descs and {tx,rx} buffers
   7212  */
   7213 static void
   7214 wm_free_txrx_queues(struct wm_softc *sc)
   7215 {
   7216 	int i;
   7217 
   7218 	for (i = 0; i < sc->sc_nqueues; i++) {
   7219 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7220 
   7221 #ifdef WM_EVENT_COUNTERS
   7222 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7223 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7224 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7225 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7226 #endif /* WM_EVENT_COUNTERS */
   7227 
   7228 		wm_free_rx_buffer(sc, rxq);
   7229 		wm_free_rx_descs(sc, rxq);
   7230 		if (rxq->rxq_lock)
   7231 			mutex_obj_free(rxq->rxq_lock);
   7232 	}
   7233 
   7234 	for (i = 0; i < sc->sc_nqueues; i++) {
   7235 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7236 		struct mbuf *m;
   7237 #ifdef WM_EVENT_COUNTERS
   7238 		int j;
   7239 
   7240 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7241 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7242 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7243 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7244 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7245 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7246 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7247 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7248 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7249 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7250 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7251 
   7252 		for (j = 0; j < WM_NTXSEGS; j++)
   7253 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7254 
   7255 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7256 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7257 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7258 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7259 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7260 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7261 #endif /* WM_EVENT_COUNTERS */
   7262 
   7263 		/* Drain txq_interq */
   7264 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7265 			m_freem(m);
   7266 		pcq_destroy(txq->txq_interq);
   7267 
   7268 		wm_free_tx_buffer(sc, txq);
   7269 		wm_free_tx_descs(sc, txq);
   7270 		if (txq->txq_lock)
   7271 			mutex_obj_free(txq->txq_lock);
   7272 	}
   7273 
   7274 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7275 }
   7276 
   7277 static void
   7278 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7279 {
   7280 
   7281 	KASSERT(mutex_owned(txq->txq_lock));
   7282 
   7283 	/* Initialize the transmit descriptor ring. */
   7284 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7285 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7286 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7287 	txq->txq_free = WM_NTXDESC(txq);
   7288 	txq->txq_next = 0;
   7289 }
   7290 
   7291 static void
   7292 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7293     struct wm_txqueue *txq)
   7294 {
   7295 
   7296 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7297 		device_xname(sc->sc_dev), __func__));
   7298 	KASSERT(mutex_owned(txq->txq_lock));
   7299 
   7300 	if (sc->sc_type < WM_T_82543) {
   7301 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7302 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7303 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7304 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7305 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7306 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7307 	} else {
   7308 		int qid = wmq->wmq_id;
   7309 
   7310 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7311 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7312 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7313 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7314 
   7315 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7316 			/*
   7317 			 * Don't write TDT before TCTL.EN is set.
   7318 			 * See the document.
   7319 			 */
   7320 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7321 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7322 			    | TXDCTL_WTHRESH(0));
   7323 		else {
   7324 			/* XXX should update with AIM? */
   7325 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7326 			if (sc->sc_type >= WM_T_82540) {
   7327 				/* Should be the same */
   7328 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7329 			}
   7330 
   7331 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7332 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7333 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7334 		}
   7335 	}
   7336 }
   7337 
   7338 static void
   7339 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7340 {
   7341 	int i;
   7342 
   7343 	KASSERT(mutex_owned(txq->txq_lock));
   7344 
   7345 	/* Initialize the transmit job descriptors. */
   7346 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7347 		txq->txq_soft[i].txs_mbuf = NULL;
   7348 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7349 	txq->txq_snext = 0;
   7350 	txq->txq_sdirty = 0;
   7351 }
   7352 
   7353 static void
   7354 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7355     struct wm_txqueue *txq)
   7356 {
   7357 
   7358 	KASSERT(mutex_owned(txq->txq_lock));
   7359 
   7360 	/*
   7361 	 * Set up some register offsets that are different between
   7362 	 * the i82542 and the i82543 and later chips.
   7363 	 */
   7364 	if (sc->sc_type < WM_T_82543)
   7365 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7366 	else
   7367 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7368 
   7369 	wm_init_tx_descs(sc, txq);
   7370 	wm_init_tx_regs(sc, wmq, txq);
   7371 	wm_init_tx_buffer(sc, txq);
   7372 
   7373 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7374 	txq->txq_sending = false;
   7375 }
   7376 
   7377 static void
   7378 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7379     struct wm_rxqueue *rxq)
   7380 {
   7381 
   7382 	KASSERT(mutex_owned(rxq->rxq_lock));
   7383 
   7384 	/*
   7385 	 * Initialize the receive descriptor and receive job
   7386 	 * descriptor rings.
   7387 	 */
   7388 	if (sc->sc_type < WM_T_82543) {
   7389 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7390 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7391 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7392 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7393 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7394 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7395 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7396 
   7397 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7398 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7399 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7400 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7401 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7402 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7403 	} else {
   7404 		int qid = wmq->wmq_id;
   7405 
   7406 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7407 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7408 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7409 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7410 
   7411 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7412 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7413 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7414 
   7415 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7416 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7417 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7418 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7419 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7420 			    | RXDCTL_WTHRESH(1));
   7421 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7422 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7423 		} else {
   7424 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7425 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7426 			/* XXX should update with AIM? */
   7427 			CSR_WRITE(sc, WMREG_RDTR,
   7428 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7429 			/* MUST be same */
   7430 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7431 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7432 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7433 		}
   7434 	}
   7435 }
   7436 
   7437 static int
   7438 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7439 {
   7440 	struct wm_rxsoft *rxs;
   7441 	int error, i;
   7442 
   7443 	KASSERT(mutex_owned(rxq->rxq_lock));
   7444 
   7445 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7446 		rxs = &rxq->rxq_soft[i];
   7447 		if (rxs->rxs_mbuf == NULL) {
   7448 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7449 				log(LOG_ERR, "%s: unable to allocate or map "
   7450 				    "rx buffer %d, error = %d\n",
   7451 				    device_xname(sc->sc_dev), i, error);
   7452 				/*
   7453 				 * XXX Should attempt to run with fewer receive
   7454 				 * XXX buffers instead of just failing.
   7455 				 */
   7456 				wm_rxdrain(rxq);
   7457 				return ENOMEM;
   7458 			}
   7459 		} else {
   7460 			/*
   7461 			 * For 82575 and 82576, the RX descriptors must be
   7462 			 * initialized after the setting of RCTL.EN in
   7463 			 * wm_set_filter()
   7464 			 */
   7465 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7466 				wm_init_rxdesc(rxq, i);
   7467 		}
   7468 	}
   7469 	rxq->rxq_ptr = 0;
   7470 	rxq->rxq_discard = 0;
   7471 	WM_RXCHAIN_RESET(rxq);
   7472 
   7473 	return 0;
   7474 }
   7475 
   7476 static int
   7477 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7478     struct wm_rxqueue *rxq)
   7479 {
   7480 
   7481 	KASSERT(mutex_owned(rxq->rxq_lock));
   7482 
   7483 	/*
   7484 	 * Set up some register offsets that are different between
   7485 	 * the i82542 and the i82543 and later chips.
   7486 	 */
   7487 	if (sc->sc_type < WM_T_82543)
   7488 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7489 	else
   7490 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7491 
   7492 	wm_init_rx_regs(sc, wmq, rxq);
   7493 	return wm_init_rx_buffer(sc, rxq);
   7494 }
   7495 
   7496 /*
   7497  * wm_init_quques:
   7498  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7499  */
   7500 static int
   7501 wm_init_txrx_queues(struct wm_softc *sc)
   7502 {
   7503 	int i, error = 0;
   7504 
   7505 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7506 		device_xname(sc->sc_dev), __func__));
   7507 
   7508 	for (i = 0; i < sc->sc_nqueues; i++) {
   7509 		struct wm_queue *wmq = &sc->sc_queue[i];
   7510 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7511 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7512 
   7513 		/*
   7514 		 * TODO
   7515 		 * Currently, use constant variable instead of AIM.
   7516 		 * Furthermore, the interrupt interval of multiqueue which use
   7517 		 * polling mode is less than default value.
   7518 		 * More tuning and AIM are required.
   7519 		 */
   7520 		if (wm_is_using_multiqueue(sc))
   7521 			wmq->wmq_itr = 50;
   7522 		else
   7523 			wmq->wmq_itr = sc->sc_itr_init;
   7524 		wmq->wmq_set_itr = true;
   7525 
   7526 		mutex_enter(txq->txq_lock);
   7527 		wm_init_tx_queue(sc, wmq, txq);
   7528 		mutex_exit(txq->txq_lock);
   7529 
   7530 		mutex_enter(rxq->rxq_lock);
   7531 		error = wm_init_rx_queue(sc, wmq, rxq);
   7532 		mutex_exit(rxq->rxq_lock);
   7533 		if (error)
   7534 			break;
   7535 	}
   7536 
   7537 	return error;
   7538 }
   7539 
   7540 /*
   7541  * wm_tx_offload:
   7542  *
   7543  *	Set up TCP/IP checksumming parameters for the
   7544  *	specified packet.
   7545  */
   7546 static void
   7547 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7548     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7549 {
   7550 	struct mbuf *m0 = txs->txs_mbuf;
   7551 	struct livengood_tcpip_ctxdesc *t;
   7552 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7553 	uint32_t ipcse;
   7554 	struct ether_header *eh;
   7555 	int offset, iphl;
   7556 	uint8_t fields;
   7557 
   7558 	/*
   7559 	 * XXX It would be nice if the mbuf pkthdr had offset
   7560 	 * fields for the protocol headers.
   7561 	 */
   7562 
   7563 	eh = mtod(m0, struct ether_header *);
   7564 	switch (htons(eh->ether_type)) {
   7565 	case ETHERTYPE_IP:
   7566 	case ETHERTYPE_IPV6:
   7567 		offset = ETHER_HDR_LEN;
   7568 		break;
   7569 
   7570 	case ETHERTYPE_VLAN:
   7571 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7572 		break;
   7573 
   7574 	default:
   7575 		/* Don't support this protocol or encapsulation. */
   7576 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7577 		txq->txq_last_hw_ipcs = 0;
   7578 		txq->txq_last_hw_tucs = 0;
   7579 		*fieldsp = 0;
   7580 		*cmdp = 0;
   7581 		return;
   7582 	}
   7583 
   7584 	if ((m0->m_pkthdr.csum_flags &
   7585 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7586 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7587 	} else
   7588 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7589 
   7590 	ipcse = offset + iphl - 1;
   7591 
   7592 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7593 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7594 	seg = 0;
   7595 	fields = 0;
   7596 
   7597 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7598 		int hlen = offset + iphl;
   7599 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7600 
   7601 		if (__predict_false(m0->m_len <
   7602 				    (hlen + sizeof(struct tcphdr)))) {
   7603 			/*
   7604 			 * TCP/IP headers are not in the first mbuf; we need
   7605 			 * to do this the slow and painful way. Let's just
   7606 			 * hope this doesn't happen very often.
   7607 			 */
   7608 			struct tcphdr th;
   7609 
   7610 			WM_Q_EVCNT_INCR(txq, tsopain);
   7611 
   7612 			m_copydata(m0, hlen, sizeof(th), &th);
   7613 			if (v4) {
   7614 				struct ip ip;
   7615 
   7616 				m_copydata(m0, offset, sizeof(ip), &ip);
   7617 				ip.ip_len = 0;
   7618 				m_copyback(m0,
   7619 				    offset + offsetof(struct ip, ip_len),
   7620 				    sizeof(ip.ip_len), &ip.ip_len);
   7621 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7622 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7623 			} else {
   7624 				struct ip6_hdr ip6;
   7625 
   7626 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7627 				ip6.ip6_plen = 0;
   7628 				m_copyback(m0,
   7629 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7630 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7631 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7632 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7633 			}
   7634 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7635 			    sizeof(th.th_sum), &th.th_sum);
   7636 
   7637 			hlen += th.th_off << 2;
   7638 		} else {
   7639 			/*
   7640 			 * TCP/IP headers are in the first mbuf; we can do
   7641 			 * this the easy way.
   7642 			 */
   7643 			struct tcphdr *th;
   7644 
   7645 			if (v4) {
   7646 				struct ip *ip =
   7647 				    (void *)(mtod(m0, char *) + offset);
   7648 				th = (void *)(mtod(m0, char *) + hlen);
   7649 
   7650 				ip->ip_len = 0;
   7651 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7652 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7653 			} else {
   7654 				struct ip6_hdr *ip6 =
   7655 				    (void *)(mtod(m0, char *) + offset);
   7656 				th = (void *)(mtod(m0, char *) + hlen);
   7657 
   7658 				ip6->ip6_plen = 0;
   7659 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7660 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7661 			}
   7662 			hlen += th->th_off << 2;
   7663 		}
   7664 
   7665 		if (v4) {
   7666 			WM_Q_EVCNT_INCR(txq, tso);
   7667 			cmdlen |= WTX_TCPIP_CMD_IP;
   7668 		} else {
   7669 			WM_Q_EVCNT_INCR(txq, tso6);
   7670 			ipcse = 0;
   7671 		}
   7672 		cmd |= WTX_TCPIP_CMD_TSE;
   7673 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7674 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7675 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7676 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7677 	}
   7678 
   7679 	/*
   7680 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7681 	 * offload feature, if we load the context descriptor, we
   7682 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7683 	 */
   7684 
   7685 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7686 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7687 	    WTX_TCPIP_IPCSE(ipcse);
   7688 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7689 		WM_Q_EVCNT_INCR(txq, ipsum);
   7690 		fields |= WTX_IXSM;
   7691 	}
   7692 
   7693 	offset += iphl;
   7694 
   7695 	if (m0->m_pkthdr.csum_flags &
   7696 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7697 		WM_Q_EVCNT_INCR(txq, tusum);
   7698 		fields |= WTX_TXSM;
   7699 		tucs = WTX_TCPIP_TUCSS(offset) |
   7700 		    WTX_TCPIP_TUCSO(offset +
   7701 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7702 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7703 	} else if ((m0->m_pkthdr.csum_flags &
   7704 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7705 		WM_Q_EVCNT_INCR(txq, tusum6);
   7706 		fields |= WTX_TXSM;
   7707 		tucs = WTX_TCPIP_TUCSS(offset) |
   7708 		    WTX_TCPIP_TUCSO(offset +
   7709 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7710 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7711 	} else {
   7712 		/* Just initialize it to a valid TCP context. */
   7713 		tucs = WTX_TCPIP_TUCSS(offset) |
   7714 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7715 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7716 	}
   7717 
   7718 	*cmdp = cmd;
   7719 	*fieldsp = fields;
   7720 
   7721 	/*
   7722 	 * We don't have to write context descriptor for every packet
   7723 	 * except for 82574. For 82574, we must write context descriptor
   7724 	 * for every packet when we use two descriptor queues.
   7725 	 *
   7726 	 * The 82574L can only remember the *last* context used
   7727 	 * regardless of queue that it was use for.  We cannot reuse
   7728 	 * contexts on this hardware platform and must generate a new
   7729 	 * context every time.  82574L hardware spec, section 7.2.6,
   7730 	 * second note.
   7731 	 */
   7732 	if (sc->sc_nqueues < 2) {
   7733 		/*
   7734 		 * Setting up new checksum offload context for every
   7735 		 * frames takes a lot of processing time for hardware.
   7736 		 * This also reduces performance a lot for small sized
   7737 		 * frames so avoid it if driver can use previously
   7738 		 * configured checksum offload context.
   7739 		 * For TSO, in theory we can use the same TSO context only if
   7740 		 * frame is the same type(IP/TCP) and the same MSS. However
   7741 		 * checking whether a frame has the same IP/TCP structure is
   7742 		 * hard thing so just ignore that and always restablish a
   7743 		 * new TSO context.
   7744 		 */
   7745 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   7746 		    == 0) {
   7747 			if (txq->txq_last_hw_cmd == cmd &&
   7748 			    txq->txq_last_hw_fields == fields &&
   7749 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   7750 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   7751 				WM_Q_EVCNT_INCR(txq, skipcontext);
   7752 				return;
   7753 			}
   7754 		}
   7755 
   7756 		txq->txq_last_hw_cmd = cmd;
   7757 		txq->txq_last_hw_fields = fields;
   7758 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   7759 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   7760 	}
   7761 
   7762 	/* Fill in the context descriptor. */
   7763 	t = (struct livengood_tcpip_ctxdesc *)
   7764 	    &txq->txq_descs[txq->txq_next];
   7765 	t->tcpip_ipcs = htole32(ipcs);
   7766 	t->tcpip_tucs = htole32(tucs);
   7767 	t->tcpip_cmdlen = htole32(cmdlen);
   7768 	t->tcpip_seg = htole32(seg);
   7769 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7770 
   7771 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7772 	txs->txs_ndesc++;
   7773 }
   7774 
   7775 static inline int
   7776 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7777 {
   7778 	struct wm_softc *sc = ifp->if_softc;
   7779 	u_int cpuid = cpu_index(curcpu());
   7780 
   7781 	/*
   7782 	 * Currently, simple distribute strategy.
   7783 	 * TODO:
   7784 	 * distribute by flowid(RSS has value).
   7785 	 */
   7786 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7787 }
   7788 
   7789 static inline bool
   7790 wm_linkdown_discard(struct wm_txqueue *txq)
   7791 {
   7792 
   7793 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   7794 		return true;
   7795 
   7796 	return false;
   7797 }
   7798 
   7799 /*
   7800  * wm_start:		[ifnet interface function]
   7801  *
   7802  *	Start packet transmission on the interface.
   7803  */
   7804 static void
   7805 wm_start(struct ifnet *ifp)
   7806 {
   7807 	struct wm_softc *sc = ifp->if_softc;
   7808 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7809 
   7810 #ifdef WM_MPSAFE
   7811 	KASSERT(if_is_mpsafe(ifp));
   7812 #endif
   7813 	/*
   7814 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7815 	 */
   7816 
   7817 	mutex_enter(txq->txq_lock);
   7818 	if (!txq->txq_stopping)
   7819 		wm_start_locked(ifp);
   7820 	mutex_exit(txq->txq_lock);
   7821 }
   7822 
   7823 static void
   7824 wm_start_locked(struct ifnet *ifp)
   7825 {
   7826 	struct wm_softc *sc = ifp->if_softc;
   7827 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7828 
   7829 	wm_send_common_locked(ifp, txq, false);
   7830 }
   7831 
   7832 static int
   7833 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7834 {
   7835 	int qid;
   7836 	struct wm_softc *sc = ifp->if_softc;
   7837 	struct wm_txqueue *txq;
   7838 
   7839 	qid = wm_select_txqueue(ifp, m);
   7840 	txq = &sc->sc_queue[qid].wmq_txq;
   7841 
   7842 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7843 		m_freem(m);
   7844 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7845 		return ENOBUFS;
   7846 	}
   7847 
   7848 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7849 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7850 	if (m->m_flags & M_MCAST)
   7851 		if_statinc_ref(nsr, if_omcasts);
   7852 	IF_STAT_PUTREF(ifp);
   7853 
   7854 	if (mutex_tryenter(txq->txq_lock)) {
   7855 		if (!txq->txq_stopping)
   7856 			wm_transmit_locked(ifp, txq);
   7857 		mutex_exit(txq->txq_lock);
   7858 	}
   7859 
   7860 	return 0;
   7861 }
   7862 
   7863 static void
   7864 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7865 {
   7866 
   7867 	wm_send_common_locked(ifp, txq, true);
   7868 }
   7869 
   7870 static void
   7871 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7872     bool is_transmit)
   7873 {
   7874 	struct wm_softc *sc = ifp->if_softc;
   7875 	struct mbuf *m0;
   7876 	struct wm_txsoft *txs;
   7877 	bus_dmamap_t dmamap;
   7878 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7879 	bus_addr_t curaddr;
   7880 	bus_size_t seglen, curlen;
   7881 	uint32_t cksumcmd;
   7882 	uint8_t cksumfields;
   7883 	bool remap = true;
   7884 
   7885 	KASSERT(mutex_owned(txq->txq_lock));
   7886 
   7887 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7888 		return;
   7889 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7890 		return;
   7891 
   7892 	if (__predict_false(wm_linkdown_discard(txq))) {
   7893 		do {
   7894 			if (is_transmit)
   7895 				m0 = pcq_get(txq->txq_interq);
   7896 			else
   7897 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   7898 			/*
   7899 			 * increment successed packet counter as in the case
   7900 			 * which the packet is discarded by link down PHY.
   7901 			 */
   7902 			if (m0 != NULL)
   7903 				if_statinc(ifp, if_opackets);
   7904 			m_freem(m0);
   7905 		} while (m0 != NULL);
   7906 		return;
   7907 	}
   7908 
   7909 	/* Remember the previous number of free descriptors. */
   7910 	ofree = txq->txq_free;
   7911 
   7912 	/*
   7913 	 * Loop through the send queue, setting up transmit descriptors
   7914 	 * until we drain the queue, or use up all available transmit
   7915 	 * descriptors.
   7916 	 */
   7917 	for (;;) {
   7918 		m0 = NULL;
   7919 
   7920 		/* Get a work queue entry. */
   7921 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7922 			wm_txeof(txq, UINT_MAX);
   7923 			if (txq->txq_sfree == 0) {
   7924 				DPRINTF(sc, WM_DEBUG_TX,
   7925 				    ("%s: TX: no free job descriptors\n",
   7926 					device_xname(sc->sc_dev)));
   7927 				WM_Q_EVCNT_INCR(txq, txsstall);
   7928 				break;
   7929 			}
   7930 		}
   7931 
   7932 		/* Grab a packet off the queue. */
   7933 		if (is_transmit)
   7934 			m0 = pcq_get(txq->txq_interq);
   7935 		else
   7936 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7937 		if (m0 == NULL)
   7938 			break;
   7939 
   7940 		DPRINTF(sc, WM_DEBUG_TX,
   7941 		    ("%s: TX: have packet to transmit: %p\n",
   7942 			device_xname(sc->sc_dev), m0));
   7943 
   7944 		txs = &txq->txq_soft[txq->txq_snext];
   7945 		dmamap = txs->txs_dmamap;
   7946 
   7947 		use_tso = (m0->m_pkthdr.csum_flags &
   7948 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7949 
   7950 		/*
   7951 		 * So says the Linux driver:
   7952 		 * The controller does a simple calculation to make sure
   7953 		 * there is enough room in the FIFO before initiating the
   7954 		 * DMA for each buffer. The calc is:
   7955 		 *	4 = ceil(buffer len / MSS)
   7956 		 * To make sure we don't overrun the FIFO, adjust the max
   7957 		 * buffer len if the MSS drops.
   7958 		 */
   7959 		dmamap->dm_maxsegsz =
   7960 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7961 		    ? m0->m_pkthdr.segsz << 2
   7962 		    : WTX_MAX_LEN;
   7963 
   7964 		/*
   7965 		 * Load the DMA map.  If this fails, the packet either
   7966 		 * didn't fit in the allotted number of segments, or we
   7967 		 * were short on resources.  For the too-many-segments
   7968 		 * case, we simply report an error and drop the packet,
   7969 		 * since we can't sanely copy a jumbo packet to a single
   7970 		 * buffer.
   7971 		 */
   7972 retry:
   7973 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7974 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7975 		if (__predict_false(error)) {
   7976 			if (error == EFBIG) {
   7977 				if (remap == true) {
   7978 					struct mbuf *m;
   7979 
   7980 					remap = false;
   7981 					m = m_defrag(m0, M_NOWAIT);
   7982 					if (m != NULL) {
   7983 						WM_Q_EVCNT_INCR(txq, defrag);
   7984 						m0 = m;
   7985 						goto retry;
   7986 					}
   7987 				}
   7988 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7989 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7990 				    "DMA segments, dropping...\n",
   7991 				    device_xname(sc->sc_dev));
   7992 				wm_dump_mbuf_chain(sc, m0);
   7993 				m_freem(m0);
   7994 				continue;
   7995 			}
   7996 			/* Short on resources, just stop for now. */
   7997 			DPRINTF(sc, WM_DEBUG_TX,
   7998 			    ("%s: TX: dmamap load failed: %d\n",
   7999 				device_xname(sc->sc_dev), error));
   8000 			break;
   8001 		}
   8002 
   8003 		segs_needed = dmamap->dm_nsegs;
   8004 		if (use_tso) {
   8005 			/* For sentinel descriptor; see below. */
   8006 			segs_needed++;
   8007 		}
   8008 
   8009 		/*
   8010 		 * Ensure we have enough descriptors free to describe
   8011 		 * the packet. Note, we always reserve one descriptor
   8012 		 * at the end of the ring due to the semantics of the
   8013 		 * TDT register, plus one more in the event we need
   8014 		 * to load offload context.
   8015 		 */
   8016 		if (segs_needed > txq->txq_free - 2) {
   8017 			/*
   8018 			 * Not enough free descriptors to transmit this
   8019 			 * packet.  We haven't committed anything yet,
   8020 			 * so just unload the DMA map, put the packet
   8021 			 * pack on the queue, and punt. Notify the upper
   8022 			 * layer that there are no more slots left.
   8023 			 */
   8024 			DPRINTF(sc, WM_DEBUG_TX,
   8025 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8026 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8027 				segs_needed, txq->txq_free - 1));
   8028 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8029 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8030 			WM_Q_EVCNT_INCR(txq, txdstall);
   8031 			break;
   8032 		}
   8033 
   8034 		/*
   8035 		 * Check for 82547 Tx FIFO bug. We need to do this
   8036 		 * once we know we can transmit the packet, since we
   8037 		 * do some internal FIFO space accounting here.
   8038 		 */
   8039 		if (sc->sc_type == WM_T_82547 &&
   8040 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8041 			DPRINTF(sc, WM_DEBUG_TX,
   8042 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8043 				device_xname(sc->sc_dev)));
   8044 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8045 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8046 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8047 			break;
   8048 		}
   8049 
   8050 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8051 
   8052 		DPRINTF(sc, WM_DEBUG_TX,
   8053 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8054 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8055 
   8056 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8057 
   8058 		/*
   8059 		 * Store a pointer to the packet so that we can free it
   8060 		 * later.
   8061 		 *
   8062 		 * Initially, we consider the number of descriptors the
   8063 		 * packet uses the number of DMA segments.  This may be
   8064 		 * incremented by 1 if we do checksum offload (a descriptor
   8065 		 * is used to set the checksum context).
   8066 		 */
   8067 		txs->txs_mbuf = m0;
   8068 		txs->txs_firstdesc = txq->txq_next;
   8069 		txs->txs_ndesc = segs_needed;
   8070 
   8071 		/* Set up offload parameters for this packet. */
   8072 		if (m0->m_pkthdr.csum_flags &
   8073 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8074 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8075 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8076 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8077 		} else {
   8078 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8079 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8080 			cksumcmd = 0;
   8081 			cksumfields = 0;
   8082 		}
   8083 
   8084 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8085 
   8086 		/* Sync the DMA map. */
   8087 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8088 		    BUS_DMASYNC_PREWRITE);
   8089 
   8090 		/* Initialize the transmit descriptor. */
   8091 		for (nexttx = txq->txq_next, seg = 0;
   8092 		     seg < dmamap->dm_nsegs; seg++) {
   8093 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8094 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8095 			     seglen != 0;
   8096 			     curaddr += curlen, seglen -= curlen,
   8097 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8098 				curlen = seglen;
   8099 
   8100 				/*
   8101 				 * So says the Linux driver:
   8102 				 * Work around for premature descriptor
   8103 				 * write-backs in TSO mode.  Append a
   8104 				 * 4-byte sentinel descriptor.
   8105 				 */
   8106 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8107 				    curlen > 8)
   8108 					curlen -= 4;
   8109 
   8110 				wm_set_dma_addr(
   8111 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8112 				txq->txq_descs[nexttx].wtx_cmdlen
   8113 				    = htole32(cksumcmd | curlen);
   8114 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8115 				    = 0;
   8116 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8117 				    = cksumfields;
   8118 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8119 				lasttx = nexttx;
   8120 
   8121 				DPRINTF(sc, WM_DEBUG_TX,
   8122 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8123 					"len %#04zx\n",
   8124 					device_xname(sc->sc_dev), nexttx,
   8125 					(uint64_t)curaddr, curlen));
   8126 			}
   8127 		}
   8128 
   8129 		KASSERT(lasttx != -1);
   8130 
   8131 		/*
   8132 		 * Set up the command byte on the last descriptor of
   8133 		 * the packet. If we're in the interrupt delay window,
   8134 		 * delay the interrupt.
   8135 		 */
   8136 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8137 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8138 
   8139 		/*
   8140 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8141 		 * up the descriptor to encapsulate the packet for us.
   8142 		 *
   8143 		 * This is only valid on the last descriptor of the packet.
   8144 		 */
   8145 		if (vlan_has_tag(m0)) {
   8146 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8147 			    htole32(WTX_CMD_VLE);
   8148 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8149 			    = htole16(vlan_get_tag(m0));
   8150 		}
   8151 
   8152 		txs->txs_lastdesc = lasttx;
   8153 
   8154 		DPRINTF(sc, WM_DEBUG_TX,
   8155 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8156 			device_xname(sc->sc_dev),
   8157 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8158 
   8159 		/* Sync the descriptors we're using. */
   8160 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8161 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8162 
   8163 		/* Give the packet to the chip. */
   8164 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8165 
   8166 		DPRINTF(sc, WM_DEBUG_TX,
   8167 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8168 
   8169 		DPRINTF(sc, WM_DEBUG_TX,
   8170 		    ("%s: TX: finished transmitting packet, job %d\n",
   8171 			device_xname(sc->sc_dev), txq->txq_snext));
   8172 
   8173 		/* Advance the tx pointer. */
   8174 		txq->txq_free -= txs->txs_ndesc;
   8175 		txq->txq_next = nexttx;
   8176 
   8177 		txq->txq_sfree--;
   8178 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8179 
   8180 		/* Pass the packet to any BPF listeners. */
   8181 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8182 	}
   8183 
   8184 	if (m0 != NULL) {
   8185 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8186 		WM_Q_EVCNT_INCR(txq, descdrop);
   8187 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8188 			__func__));
   8189 		m_freem(m0);
   8190 	}
   8191 
   8192 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8193 		/* No more slots; notify upper layer. */
   8194 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8195 	}
   8196 
   8197 	if (txq->txq_free != ofree) {
   8198 		/* Set a watchdog timer in case the chip flakes out. */
   8199 		txq->txq_lastsent = time_uptime;
   8200 		txq->txq_sending = true;
   8201 	}
   8202 }
   8203 
   8204 /*
   8205  * wm_nq_tx_offload:
   8206  *
   8207  *	Set up TCP/IP checksumming parameters for the
   8208  *	specified packet, for NEWQUEUE devices
   8209  */
   8210 static void
   8211 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8212     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8213 {
   8214 	struct mbuf *m0 = txs->txs_mbuf;
   8215 	uint32_t vl_len, mssidx, cmdc;
   8216 	struct ether_header *eh;
   8217 	int offset, iphl;
   8218 
   8219 	/*
   8220 	 * XXX It would be nice if the mbuf pkthdr had offset
   8221 	 * fields for the protocol headers.
   8222 	 */
   8223 	*cmdlenp = 0;
   8224 	*fieldsp = 0;
   8225 
   8226 	eh = mtod(m0, struct ether_header *);
   8227 	switch (htons(eh->ether_type)) {
   8228 	case ETHERTYPE_IP:
   8229 	case ETHERTYPE_IPV6:
   8230 		offset = ETHER_HDR_LEN;
   8231 		break;
   8232 
   8233 	case ETHERTYPE_VLAN:
   8234 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8235 		break;
   8236 
   8237 	default:
   8238 		/* Don't support this protocol or encapsulation. */
   8239 		*do_csum = false;
   8240 		return;
   8241 	}
   8242 	*do_csum = true;
   8243 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8244 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8245 
   8246 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8247 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8248 
   8249 	if ((m0->m_pkthdr.csum_flags &
   8250 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8251 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8252 	} else {
   8253 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8254 	}
   8255 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8256 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8257 
   8258 	if (vlan_has_tag(m0)) {
   8259 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8260 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8261 		*cmdlenp |= NQTX_CMD_VLE;
   8262 	}
   8263 
   8264 	mssidx = 0;
   8265 
   8266 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8267 		int hlen = offset + iphl;
   8268 		int tcp_hlen;
   8269 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8270 
   8271 		if (__predict_false(m0->m_len <
   8272 				    (hlen + sizeof(struct tcphdr)))) {
   8273 			/*
   8274 			 * TCP/IP headers are not in the first mbuf; we need
   8275 			 * to do this the slow and painful way. Let's just
   8276 			 * hope this doesn't happen very often.
   8277 			 */
   8278 			struct tcphdr th;
   8279 
   8280 			WM_Q_EVCNT_INCR(txq, tsopain);
   8281 
   8282 			m_copydata(m0, hlen, sizeof(th), &th);
   8283 			if (v4) {
   8284 				struct ip ip;
   8285 
   8286 				m_copydata(m0, offset, sizeof(ip), &ip);
   8287 				ip.ip_len = 0;
   8288 				m_copyback(m0,
   8289 				    offset + offsetof(struct ip, ip_len),
   8290 				    sizeof(ip.ip_len), &ip.ip_len);
   8291 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8292 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8293 			} else {
   8294 				struct ip6_hdr ip6;
   8295 
   8296 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8297 				ip6.ip6_plen = 0;
   8298 				m_copyback(m0,
   8299 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8300 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8301 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8302 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8303 			}
   8304 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8305 			    sizeof(th.th_sum), &th.th_sum);
   8306 
   8307 			tcp_hlen = th.th_off << 2;
   8308 		} else {
   8309 			/*
   8310 			 * TCP/IP headers are in the first mbuf; we can do
   8311 			 * this the easy way.
   8312 			 */
   8313 			struct tcphdr *th;
   8314 
   8315 			if (v4) {
   8316 				struct ip *ip =
   8317 				    (void *)(mtod(m0, char *) + offset);
   8318 				th = (void *)(mtod(m0, char *) + hlen);
   8319 
   8320 				ip->ip_len = 0;
   8321 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8322 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8323 			} else {
   8324 				struct ip6_hdr *ip6 =
   8325 				    (void *)(mtod(m0, char *) + offset);
   8326 				th = (void *)(mtod(m0, char *) + hlen);
   8327 
   8328 				ip6->ip6_plen = 0;
   8329 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8330 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8331 			}
   8332 			tcp_hlen = th->th_off << 2;
   8333 		}
   8334 		hlen += tcp_hlen;
   8335 		*cmdlenp |= NQTX_CMD_TSE;
   8336 
   8337 		if (v4) {
   8338 			WM_Q_EVCNT_INCR(txq, tso);
   8339 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8340 		} else {
   8341 			WM_Q_EVCNT_INCR(txq, tso6);
   8342 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8343 		}
   8344 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8345 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8346 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8347 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8348 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8349 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8350 	} else {
   8351 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8352 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8353 	}
   8354 
   8355 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8356 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8357 		cmdc |= NQTXC_CMD_IP4;
   8358 	}
   8359 
   8360 	if (m0->m_pkthdr.csum_flags &
   8361 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8362 		WM_Q_EVCNT_INCR(txq, tusum);
   8363 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8364 			cmdc |= NQTXC_CMD_TCP;
   8365 		else
   8366 			cmdc |= NQTXC_CMD_UDP;
   8367 
   8368 		cmdc |= NQTXC_CMD_IP4;
   8369 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8370 	}
   8371 	if (m0->m_pkthdr.csum_flags &
   8372 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8373 		WM_Q_EVCNT_INCR(txq, tusum6);
   8374 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8375 			cmdc |= NQTXC_CMD_TCP;
   8376 		else
   8377 			cmdc |= NQTXC_CMD_UDP;
   8378 
   8379 		cmdc |= NQTXC_CMD_IP6;
   8380 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8381 	}
   8382 
   8383 	/*
   8384 	 * We don't have to write context descriptor for every packet to
   8385 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8386 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8387 	 * controllers.
   8388 	 * It would be overhead to write context descriptor for every packet,
   8389 	 * however it does not cause problems.
   8390 	 */
   8391 	/* Fill in the context descriptor. */
   8392 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8393 	    htole32(vl_len);
   8394 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8395 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8396 	    htole32(cmdc);
   8397 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8398 	    htole32(mssidx);
   8399 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8400 	DPRINTF(sc, WM_DEBUG_TX,
   8401 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8402 		txq->txq_next, 0, vl_len));
   8403 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8404 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8405 	txs->txs_ndesc++;
   8406 }
   8407 
   8408 /*
   8409  * wm_nq_start:		[ifnet interface function]
   8410  *
   8411  *	Start packet transmission on the interface for NEWQUEUE devices
   8412  */
   8413 static void
   8414 wm_nq_start(struct ifnet *ifp)
   8415 {
   8416 	struct wm_softc *sc = ifp->if_softc;
   8417 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8418 
   8419 #ifdef WM_MPSAFE
   8420 	KASSERT(if_is_mpsafe(ifp));
   8421 #endif
   8422 	/*
   8423 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8424 	 */
   8425 
   8426 	mutex_enter(txq->txq_lock);
   8427 	if (!txq->txq_stopping)
   8428 		wm_nq_start_locked(ifp);
   8429 	mutex_exit(txq->txq_lock);
   8430 }
   8431 
   8432 static void
   8433 wm_nq_start_locked(struct ifnet *ifp)
   8434 {
   8435 	struct wm_softc *sc = ifp->if_softc;
   8436 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8437 
   8438 	wm_nq_send_common_locked(ifp, txq, false);
   8439 }
   8440 
   8441 static int
   8442 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8443 {
   8444 	int qid;
   8445 	struct wm_softc *sc = ifp->if_softc;
   8446 	struct wm_txqueue *txq;
   8447 
   8448 	qid = wm_select_txqueue(ifp, m);
   8449 	txq = &sc->sc_queue[qid].wmq_txq;
   8450 
   8451 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8452 		m_freem(m);
   8453 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8454 		return ENOBUFS;
   8455 	}
   8456 
   8457 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8458 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8459 	if (m->m_flags & M_MCAST)
   8460 		if_statinc_ref(nsr, if_omcasts);
   8461 	IF_STAT_PUTREF(ifp);
   8462 
   8463 	/*
   8464 	 * The situations which this mutex_tryenter() fails at running time
   8465 	 * are below two patterns.
   8466 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8467 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8468 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8469 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8470 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8471 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8472 	 * stuck, either.
   8473 	 */
   8474 	if (mutex_tryenter(txq->txq_lock)) {
   8475 		if (!txq->txq_stopping)
   8476 			wm_nq_transmit_locked(ifp, txq);
   8477 		mutex_exit(txq->txq_lock);
   8478 	}
   8479 
   8480 	return 0;
   8481 }
   8482 
   8483 static void
   8484 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8485 {
   8486 
   8487 	wm_nq_send_common_locked(ifp, txq, true);
   8488 }
   8489 
   8490 static void
   8491 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8492     bool is_transmit)
   8493 {
   8494 	struct wm_softc *sc = ifp->if_softc;
   8495 	struct mbuf *m0;
   8496 	struct wm_txsoft *txs;
   8497 	bus_dmamap_t dmamap;
   8498 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8499 	bool do_csum, sent;
   8500 	bool remap = true;
   8501 
   8502 	KASSERT(mutex_owned(txq->txq_lock));
   8503 
   8504 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8505 		return;
   8506 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8507 		return;
   8508 
   8509 	if (__predict_false(wm_linkdown_discard(txq))) {
   8510 		do {
   8511 			if (is_transmit)
   8512 				m0 = pcq_get(txq->txq_interq);
   8513 			else
   8514 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8515 			/*
   8516 			 * increment successed packet counter as in the case
   8517 			 * which the packet is discarded by link down PHY.
   8518 			 */
   8519 			if (m0 != NULL)
   8520 				if_statinc(ifp, if_opackets);
   8521 			m_freem(m0);
   8522 		} while (m0 != NULL);
   8523 		return;
   8524 	}
   8525 
   8526 	sent = false;
   8527 
   8528 	/*
   8529 	 * Loop through the send queue, setting up transmit descriptors
   8530 	 * until we drain the queue, or use up all available transmit
   8531 	 * descriptors.
   8532 	 */
   8533 	for (;;) {
   8534 		m0 = NULL;
   8535 
   8536 		/* Get a work queue entry. */
   8537 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8538 			wm_txeof(txq, UINT_MAX);
   8539 			if (txq->txq_sfree == 0) {
   8540 				DPRINTF(sc, WM_DEBUG_TX,
   8541 				    ("%s: TX: no free job descriptors\n",
   8542 					device_xname(sc->sc_dev)));
   8543 				WM_Q_EVCNT_INCR(txq, txsstall);
   8544 				break;
   8545 			}
   8546 		}
   8547 
   8548 		/* Grab a packet off the queue. */
   8549 		if (is_transmit)
   8550 			m0 = pcq_get(txq->txq_interq);
   8551 		else
   8552 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8553 		if (m0 == NULL)
   8554 			break;
   8555 
   8556 		DPRINTF(sc, WM_DEBUG_TX,
   8557 		    ("%s: TX: have packet to transmit: %p\n",
   8558 		    device_xname(sc->sc_dev), m0));
   8559 
   8560 		txs = &txq->txq_soft[txq->txq_snext];
   8561 		dmamap = txs->txs_dmamap;
   8562 
   8563 		/*
   8564 		 * Load the DMA map.  If this fails, the packet either
   8565 		 * didn't fit in the allotted number of segments, or we
   8566 		 * were short on resources.  For the too-many-segments
   8567 		 * case, we simply report an error and drop the packet,
   8568 		 * since we can't sanely copy a jumbo packet to a single
   8569 		 * buffer.
   8570 		 */
   8571 retry:
   8572 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8573 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8574 		if (__predict_false(error)) {
   8575 			if (error == EFBIG) {
   8576 				if (remap == true) {
   8577 					struct mbuf *m;
   8578 
   8579 					remap = false;
   8580 					m = m_defrag(m0, M_NOWAIT);
   8581 					if (m != NULL) {
   8582 						WM_Q_EVCNT_INCR(txq, defrag);
   8583 						m0 = m;
   8584 						goto retry;
   8585 					}
   8586 				}
   8587 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8588 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8589 				    "DMA segments, dropping...\n",
   8590 				    device_xname(sc->sc_dev));
   8591 				wm_dump_mbuf_chain(sc, m0);
   8592 				m_freem(m0);
   8593 				continue;
   8594 			}
   8595 			/* Short on resources, just stop for now. */
   8596 			DPRINTF(sc, WM_DEBUG_TX,
   8597 			    ("%s: TX: dmamap load failed: %d\n",
   8598 				device_xname(sc->sc_dev), error));
   8599 			break;
   8600 		}
   8601 
   8602 		segs_needed = dmamap->dm_nsegs;
   8603 
   8604 		/*
   8605 		 * Ensure we have enough descriptors free to describe
   8606 		 * the packet. Note, we always reserve one descriptor
   8607 		 * at the end of the ring due to the semantics of the
   8608 		 * TDT register, plus one more in the event we need
   8609 		 * to load offload context.
   8610 		 */
   8611 		if (segs_needed > txq->txq_free - 2) {
   8612 			/*
   8613 			 * Not enough free descriptors to transmit this
   8614 			 * packet.  We haven't committed anything yet,
   8615 			 * so just unload the DMA map, put the packet
   8616 			 * pack on the queue, and punt. Notify the upper
   8617 			 * layer that there are no more slots left.
   8618 			 */
   8619 			DPRINTF(sc, WM_DEBUG_TX,
   8620 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8621 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8622 				segs_needed, txq->txq_free - 1));
   8623 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8624 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8625 			WM_Q_EVCNT_INCR(txq, txdstall);
   8626 			break;
   8627 		}
   8628 
   8629 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8630 
   8631 		DPRINTF(sc, WM_DEBUG_TX,
   8632 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8633 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8634 
   8635 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8636 
   8637 		/*
   8638 		 * Store a pointer to the packet so that we can free it
   8639 		 * later.
   8640 		 *
   8641 		 * Initially, we consider the number of descriptors the
   8642 		 * packet uses the number of DMA segments.  This may be
   8643 		 * incremented by 1 if we do checksum offload (a descriptor
   8644 		 * is used to set the checksum context).
   8645 		 */
   8646 		txs->txs_mbuf = m0;
   8647 		txs->txs_firstdesc = txq->txq_next;
   8648 		txs->txs_ndesc = segs_needed;
   8649 
   8650 		/* Set up offload parameters for this packet. */
   8651 		uint32_t cmdlen, fields, dcmdlen;
   8652 		if (m0->m_pkthdr.csum_flags &
   8653 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8654 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8655 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8656 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8657 			    &do_csum);
   8658 		} else {
   8659 			do_csum = false;
   8660 			cmdlen = 0;
   8661 			fields = 0;
   8662 		}
   8663 
   8664 		/* Sync the DMA map. */
   8665 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8666 		    BUS_DMASYNC_PREWRITE);
   8667 
   8668 		/* Initialize the first transmit descriptor. */
   8669 		nexttx = txq->txq_next;
   8670 		if (!do_csum) {
   8671 			/* Setup a legacy descriptor */
   8672 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8673 			    dmamap->dm_segs[0].ds_addr);
   8674 			txq->txq_descs[nexttx].wtx_cmdlen =
   8675 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8676 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8677 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8678 			if (vlan_has_tag(m0)) {
   8679 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8680 				    htole32(WTX_CMD_VLE);
   8681 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8682 				    htole16(vlan_get_tag(m0));
   8683 			} else
   8684 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8685 
   8686 			dcmdlen = 0;
   8687 		} else {
   8688 			/* Setup an advanced data descriptor */
   8689 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8690 			    htole64(dmamap->dm_segs[0].ds_addr);
   8691 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8692 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8693 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8694 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8695 			    htole32(fields);
   8696 			DPRINTF(sc, WM_DEBUG_TX,
   8697 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8698 				device_xname(sc->sc_dev), nexttx,
   8699 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8700 			DPRINTF(sc, WM_DEBUG_TX,
   8701 			    ("\t 0x%08x%08x\n", fields,
   8702 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8703 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8704 		}
   8705 
   8706 		lasttx = nexttx;
   8707 		nexttx = WM_NEXTTX(txq, nexttx);
   8708 		/*
   8709 		 * Fill in the next descriptors. legacy or advanced format
   8710 		 * is the same here
   8711 		 */
   8712 		for (seg = 1; seg < dmamap->dm_nsegs;
   8713 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8714 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8715 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8716 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8717 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8718 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8719 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8720 			lasttx = nexttx;
   8721 
   8722 			DPRINTF(sc, WM_DEBUG_TX,
   8723 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8724 				device_xname(sc->sc_dev), nexttx,
   8725 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8726 				dmamap->dm_segs[seg].ds_len));
   8727 		}
   8728 
   8729 		KASSERT(lasttx != -1);
   8730 
   8731 		/*
   8732 		 * Set up the command byte on the last descriptor of
   8733 		 * the packet. If we're in the interrupt delay window,
   8734 		 * delay the interrupt.
   8735 		 */
   8736 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8737 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8738 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8739 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8740 
   8741 		txs->txs_lastdesc = lasttx;
   8742 
   8743 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8744 		    device_xname(sc->sc_dev),
   8745 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8746 
   8747 		/* Sync the descriptors we're using. */
   8748 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8749 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8750 
   8751 		/* Give the packet to the chip. */
   8752 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8753 		sent = true;
   8754 
   8755 		DPRINTF(sc, WM_DEBUG_TX,
   8756 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8757 
   8758 		DPRINTF(sc, WM_DEBUG_TX,
   8759 		    ("%s: TX: finished transmitting packet, job %d\n",
   8760 			device_xname(sc->sc_dev), txq->txq_snext));
   8761 
   8762 		/* Advance the tx pointer. */
   8763 		txq->txq_free -= txs->txs_ndesc;
   8764 		txq->txq_next = nexttx;
   8765 
   8766 		txq->txq_sfree--;
   8767 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8768 
   8769 		/* Pass the packet to any BPF listeners. */
   8770 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8771 	}
   8772 
   8773 	if (m0 != NULL) {
   8774 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8775 		WM_Q_EVCNT_INCR(txq, descdrop);
   8776 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8777 			__func__));
   8778 		m_freem(m0);
   8779 	}
   8780 
   8781 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8782 		/* No more slots; notify upper layer. */
   8783 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8784 	}
   8785 
   8786 	if (sent) {
   8787 		/* Set a watchdog timer in case the chip flakes out. */
   8788 		txq->txq_lastsent = time_uptime;
   8789 		txq->txq_sending = true;
   8790 	}
   8791 }
   8792 
   8793 static void
   8794 wm_deferred_start_locked(struct wm_txqueue *txq)
   8795 {
   8796 	struct wm_softc *sc = txq->txq_sc;
   8797 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8798 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8799 	int qid = wmq->wmq_id;
   8800 
   8801 	KASSERT(mutex_owned(txq->txq_lock));
   8802 
   8803 	if (txq->txq_stopping) {
   8804 		mutex_exit(txq->txq_lock);
   8805 		return;
   8806 	}
   8807 
   8808 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8809 		/* XXX need for ALTQ or one CPU system */
   8810 		if (qid == 0)
   8811 			wm_nq_start_locked(ifp);
   8812 		wm_nq_transmit_locked(ifp, txq);
   8813 	} else {
   8814 		/* XXX need for ALTQ or one CPU system */
   8815 		if (qid == 0)
   8816 			wm_start_locked(ifp);
   8817 		wm_transmit_locked(ifp, txq);
   8818 	}
   8819 }
   8820 
   8821 /* Interrupt */
   8822 
   8823 /*
   8824  * wm_txeof:
   8825  *
   8826  *	Helper; handle transmit interrupts.
   8827  */
   8828 static bool
   8829 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8830 {
   8831 	struct wm_softc *sc = txq->txq_sc;
   8832 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8833 	struct wm_txsoft *txs;
   8834 	int count = 0;
   8835 	int i;
   8836 	uint8_t status;
   8837 	bool more = false;
   8838 
   8839 	KASSERT(mutex_owned(txq->txq_lock));
   8840 
   8841 	if (txq->txq_stopping)
   8842 		return false;
   8843 
   8844 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8845 
   8846 	/*
   8847 	 * Go through the Tx list and free mbufs for those
   8848 	 * frames which have been transmitted.
   8849 	 */
   8850 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8851 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8852 		if (limit-- == 0) {
   8853 			more = true;
   8854 			DPRINTF(sc, WM_DEBUG_TX,
   8855 			    ("%s: TX: loop limited, job %d is not processed\n",
   8856 				device_xname(sc->sc_dev), i));
   8857 			break;
   8858 		}
   8859 
   8860 		txs = &txq->txq_soft[i];
   8861 
   8862 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8863 			device_xname(sc->sc_dev), i));
   8864 
   8865 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8866 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8867 
   8868 		status =
   8869 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8870 		if ((status & WTX_ST_DD) == 0) {
   8871 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8872 			    BUS_DMASYNC_PREREAD);
   8873 			break;
   8874 		}
   8875 
   8876 		count++;
   8877 		DPRINTF(sc, WM_DEBUG_TX,
   8878 		    ("%s: TX: job %d done: descs %d..%d\n",
   8879 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8880 		    txs->txs_lastdesc));
   8881 
   8882 		/*
   8883 		 * XXX We should probably be using the statistics
   8884 		 * XXX registers, but I don't know if they exist
   8885 		 * XXX on chips before the i82544.
   8886 		 */
   8887 
   8888 #ifdef WM_EVENT_COUNTERS
   8889 		if (status & WTX_ST_TU)
   8890 			WM_Q_EVCNT_INCR(txq, underrun);
   8891 #endif /* WM_EVENT_COUNTERS */
   8892 
   8893 		/*
   8894 		 * 82574 and newer's document says the status field has neither
   8895 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8896 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8897 		 * Developer's Manual", 82574 datasheet and newer.
   8898 		 *
   8899 		 * XXX I saw the LC bit was set on I218 even though the media
   8900 		 * was full duplex, so the bit might be used for other
   8901 		 * meaning ...(I have no document).
   8902 		 */
   8903 
   8904 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8905 		    && ((sc->sc_type < WM_T_82574)
   8906 			|| (sc->sc_type == WM_T_80003))) {
   8907 			if_statinc(ifp, if_oerrors);
   8908 			if (status & WTX_ST_LC)
   8909 				log(LOG_WARNING, "%s: late collision\n",
   8910 				    device_xname(sc->sc_dev));
   8911 			else if (status & WTX_ST_EC) {
   8912 				if_statadd(ifp, if_collisions,
   8913 				    TX_COLLISION_THRESHOLD + 1);
   8914 				log(LOG_WARNING, "%s: excessive collisions\n",
   8915 				    device_xname(sc->sc_dev));
   8916 			}
   8917 		} else
   8918 			if_statinc(ifp, if_opackets);
   8919 
   8920 		txq->txq_packets++;
   8921 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8922 
   8923 		txq->txq_free += txs->txs_ndesc;
   8924 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8925 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8926 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8927 		m_freem(txs->txs_mbuf);
   8928 		txs->txs_mbuf = NULL;
   8929 	}
   8930 
   8931 	/* Update the dirty transmit buffer pointer. */
   8932 	txq->txq_sdirty = i;
   8933 	DPRINTF(sc, WM_DEBUG_TX,
   8934 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8935 
   8936 	if (count != 0)
   8937 		rnd_add_uint32(&sc->rnd_source, count);
   8938 
   8939 	/*
   8940 	 * If there are no more pending transmissions, cancel the watchdog
   8941 	 * timer.
   8942 	 */
   8943 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8944 		txq->txq_sending = false;
   8945 
   8946 	return more;
   8947 }
   8948 
   8949 static inline uint32_t
   8950 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8951 {
   8952 	struct wm_softc *sc = rxq->rxq_sc;
   8953 
   8954 	if (sc->sc_type == WM_T_82574)
   8955 		return EXTRXC_STATUS(
   8956 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   8957 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8958 		return NQRXC_STATUS(
   8959 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   8960 	else
   8961 		return rxq->rxq_descs[idx].wrx_status;
   8962 }
   8963 
   8964 static inline uint32_t
   8965 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8966 {
   8967 	struct wm_softc *sc = rxq->rxq_sc;
   8968 
   8969 	if (sc->sc_type == WM_T_82574)
   8970 		return EXTRXC_ERROR(
   8971 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   8972 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8973 		return NQRXC_ERROR(
   8974 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   8975 	else
   8976 		return rxq->rxq_descs[idx].wrx_errors;
   8977 }
   8978 
   8979 static inline uint16_t
   8980 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8981 {
   8982 	struct wm_softc *sc = rxq->rxq_sc;
   8983 
   8984 	if (sc->sc_type == WM_T_82574)
   8985 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8986 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8987 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8988 	else
   8989 		return rxq->rxq_descs[idx].wrx_special;
   8990 }
   8991 
   8992 static inline int
   8993 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8994 {
   8995 	struct wm_softc *sc = rxq->rxq_sc;
   8996 
   8997 	if (sc->sc_type == WM_T_82574)
   8998 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8999 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9000 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9001 	else
   9002 		return rxq->rxq_descs[idx].wrx_len;
   9003 }
   9004 
   9005 #ifdef WM_DEBUG
   9006 static inline uint32_t
   9007 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9008 {
   9009 	struct wm_softc *sc = rxq->rxq_sc;
   9010 
   9011 	if (sc->sc_type == WM_T_82574)
   9012 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9013 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9014 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9015 	else
   9016 		return 0;
   9017 }
   9018 
   9019 static inline uint8_t
   9020 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9021 {
   9022 	struct wm_softc *sc = rxq->rxq_sc;
   9023 
   9024 	if (sc->sc_type == WM_T_82574)
   9025 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9026 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9027 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9028 	else
   9029 		return 0;
   9030 }
   9031 #endif /* WM_DEBUG */
   9032 
   9033 static inline bool
   9034 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9035     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9036 {
   9037 
   9038 	if (sc->sc_type == WM_T_82574)
   9039 		return (status & ext_bit) != 0;
   9040 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9041 		return (status & nq_bit) != 0;
   9042 	else
   9043 		return (status & legacy_bit) != 0;
   9044 }
   9045 
   9046 static inline bool
   9047 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9048     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9049 {
   9050 
   9051 	if (sc->sc_type == WM_T_82574)
   9052 		return (error & ext_bit) != 0;
   9053 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9054 		return (error & nq_bit) != 0;
   9055 	else
   9056 		return (error & legacy_bit) != 0;
   9057 }
   9058 
   9059 static inline bool
   9060 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9061 {
   9062 
   9063 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9064 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9065 		return true;
   9066 	else
   9067 		return false;
   9068 }
   9069 
   9070 static inline bool
   9071 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9072 {
   9073 	struct wm_softc *sc = rxq->rxq_sc;
   9074 
   9075 	/* XXX missing error bit for newqueue? */
   9076 	if (wm_rxdesc_is_set_error(sc, errors,
   9077 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9078 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9079 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9080 		NQRXC_ERROR_RXE)) {
   9081 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9082 		    EXTRXC_ERROR_SE, 0))
   9083 			log(LOG_WARNING, "%s: symbol error\n",
   9084 			    device_xname(sc->sc_dev));
   9085 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9086 		    EXTRXC_ERROR_SEQ, 0))
   9087 			log(LOG_WARNING, "%s: receive sequence error\n",
   9088 			    device_xname(sc->sc_dev));
   9089 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9090 		    EXTRXC_ERROR_CE, 0))
   9091 			log(LOG_WARNING, "%s: CRC error\n",
   9092 			    device_xname(sc->sc_dev));
   9093 		return true;
   9094 	}
   9095 
   9096 	return false;
   9097 }
   9098 
   9099 static inline bool
   9100 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9101 {
   9102 	struct wm_softc *sc = rxq->rxq_sc;
   9103 
   9104 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9105 		NQRXC_STATUS_DD)) {
   9106 		/* We have processed all of the receive descriptors. */
   9107 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9108 		return false;
   9109 	}
   9110 
   9111 	return true;
   9112 }
   9113 
   9114 static inline bool
   9115 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9116     uint16_t vlantag, struct mbuf *m)
   9117 {
   9118 
   9119 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9120 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9121 		vlan_set_tag(m, le16toh(vlantag));
   9122 	}
   9123 
   9124 	return true;
   9125 }
   9126 
   9127 static inline void
   9128 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9129     uint32_t errors, struct mbuf *m)
   9130 {
   9131 	struct wm_softc *sc = rxq->rxq_sc;
   9132 
   9133 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9134 		if (wm_rxdesc_is_set_status(sc, status,
   9135 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9136 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9137 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9138 			if (wm_rxdesc_is_set_error(sc, errors,
   9139 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9140 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9141 		}
   9142 		if (wm_rxdesc_is_set_status(sc, status,
   9143 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9144 			/*
   9145 			 * Note: we don't know if this was TCP or UDP,
   9146 			 * so we just set both bits, and expect the
   9147 			 * upper layers to deal.
   9148 			 */
   9149 			WM_Q_EVCNT_INCR(rxq, tusum);
   9150 			m->m_pkthdr.csum_flags |=
   9151 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9152 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9153 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9154 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9155 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9156 		}
   9157 	}
   9158 }
   9159 
   9160 /*
   9161  * wm_rxeof:
   9162  *
   9163  *	Helper; handle receive interrupts.
   9164  */
   9165 static bool
   9166 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9167 {
   9168 	struct wm_softc *sc = rxq->rxq_sc;
   9169 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9170 	struct wm_rxsoft *rxs;
   9171 	struct mbuf *m;
   9172 	int i, len;
   9173 	int count = 0;
   9174 	uint32_t status, errors;
   9175 	uint16_t vlantag;
   9176 	bool more = false;
   9177 
   9178 	KASSERT(mutex_owned(rxq->rxq_lock));
   9179 
   9180 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9181 		if (limit-- == 0) {
   9182 			more = true;
   9183 			DPRINTF(sc, WM_DEBUG_RX,
   9184 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9185 				device_xname(sc->sc_dev), i));
   9186 			break;
   9187 		}
   9188 
   9189 		rxs = &rxq->rxq_soft[i];
   9190 
   9191 		DPRINTF(sc, WM_DEBUG_RX,
   9192 		    ("%s: RX: checking descriptor %d\n",
   9193 			device_xname(sc->sc_dev), i));
   9194 		wm_cdrxsync(rxq, i,
   9195 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9196 
   9197 		status = wm_rxdesc_get_status(rxq, i);
   9198 		errors = wm_rxdesc_get_errors(rxq, i);
   9199 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9200 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9201 #ifdef WM_DEBUG
   9202 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9203 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9204 #endif
   9205 
   9206 		if (!wm_rxdesc_dd(rxq, i, status)) {
   9207 			break;
   9208 		}
   9209 
   9210 		count++;
   9211 		if (__predict_false(rxq->rxq_discard)) {
   9212 			DPRINTF(sc, WM_DEBUG_RX,
   9213 			    ("%s: RX: discarding contents of descriptor %d\n",
   9214 				device_xname(sc->sc_dev), i));
   9215 			wm_init_rxdesc(rxq, i);
   9216 			if (wm_rxdesc_is_eop(rxq, status)) {
   9217 				/* Reset our state. */
   9218 				DPRINTF(sc, WM_DEBUG_RX,
   9219 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9220 					device_xname(sc->sc_dev)));
   9221 				rxq->rxq_discard = 0;
   9222 			}
   9223 			continue;
   9224 		}
   9225 
   9226 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9227 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9228 
   9229 		m = rxs->rxs_mbuf;
   9230 
   9231 		/*
   9232 		 * Add a new receive buffer to the ring, unless of
   9233 		 * course the length is zero. Treat the latter as a
   9234 		 * failed mapping.
   9235 		 */
   9236 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9237 			/*
   9238 			 * Failed, throw away what we've done so
   9239 			 * far, and discard the rest of the packet.
   9240 			 */
   9241 			if_statinc(ifp, if_ierrors);
   9242 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9243 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9244 			wm_init_rxdesc(rxq, i);
   9245 			if (!wm_rxdesc_is_eop(rxq, status))
   9246 				rxq->rxq_discard = 1;
   9247 			if (rxq->rxq_head != NULL)
   9248 				m_freem(rxq->rxq_head);
   9249 			WM_RXCHAIN_RESET(rxq);
   9250 			DPRINTF(sc, WM_DEBUG_RX,
   9251 			    ("%s: RX: Rx buffer allocation failed, "
   9252 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9253 				rxq->rxq_discard ? " (discard)" : ""));
   9254 			continue;
   9255 		}
   9256 
   9257 		m->m_len = len;
   9258 		rxq->rxq_len += len;
   9259 		DPRINTF(sc, WM_DEBUG_RX,
   9260 		    ("%s: RX: buffer at %p len %d\n",
   9261 			device_xname(sc->sc_dev), m->m_data, len));
   9262 
   9263 		/* If this is not the end of the packet, keep looking. */
   9264 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9265 			WM_RXCHAIN_LINK(rxq, m);
   9266 			DPRINTF(sc, WM_DEBUG_RX,
   9267 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9268 				device_xname(sc->sc_dev), rxq->rxq_len));
   9269 			continue;
   9270 		}
   9271 
   9272 		/*
   9273 		 * Okay, we have the entire packet now. The chip is
   9274 		 * configured to include the FCS except I35[04], I21[01].
   9275 		 * (not all chips can be configured to strip it), so we need
   9276 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9277 		 * in RCTL register is always set, so we don't trim it.
   9278 		 * PCH2 and newer chip also not include FCS when jumbo
   9279 		 * frame is used to do workaround an errata.
   9280 		 * May need to adjust length of previous mbuf in the
   9281 		 * chain if the current mbuf is too short.
   9282 		 */
   9283 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9284 			if (m->m_len < ETHER_CRC_LEN) {
   9285 				rxq->rxq_tail->m_len
   9286 				    -= (ETHER_CRC_LEN - m->m_len);
   9287 				m->m_len = 0;
   9288 			} else
   9289 				m->m_len -= ETHER_CRC_LEN;
   9290 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9291 		} else
   9292 			len = rxq->rxq_len;
   9293 
   9294 		WM_RXCHAIN_LINK(rxq, m);
   9295 
   9296 		*rxq->rxq_tailp = NULL;
   9297 		m = rxq->rxq_head;
   9298 
   9299 		WM_RXCHAIN_RESET(rxq);
   9300 
   9301 		DPRINTF(sc, WM_DEBUG_RX,
   9302 		    ("%s: RX: have entire packet, len -> %d\n",
   9303 			device_xname(sc->sc_dev), len));
   9304 
   9305 		/* If an error occurred, update stats and drop the packet. */
   9306 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9307 			m_freem(m);
   9308 			continue;
   9309 		}
   9310 
   9311 		/* No errors.  Receive the packet. */
   9312 		m_set_rcvif(m, ifp);
   9313 		m->m_pkthdr.len = len;
   9314 		/*
   9315 		 * TODO
   9316 		 * should be save rsshash and rsstype to this mbuf.
   9317 		 */
   9318 		DPRINTF(sc, WM_DEBUG_RX,
   9319 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9320 			device_xname(sc->sc_dev), rsstype, rsshash));
   9321 
   9322 		/*
   9323 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9324 		 * for us.  Associate the tag with the packet.
   9325 		 */
   9326 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9327 			continue;
   9328 
   9329 		/* Set up checksum info for this packet. */
   9330 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9331 
   9332 		rxq->rxq_packets++;
   9333 		rxq->rxq_bytes += len;
   9334 		/* Pass it on. */
   9335 		if_percpuq_enqueue(sc->sc_ipq, m);
   9336 
   9337 		if (rxq->rxq_stopping)
   9338 			break;
   9339 	}
   9340 	rxq->rxq_ptr = i;
   9341 
   9342 	if (count != 0)
   9343 		rnd_add_uint32(&sc->rnd_source, count);
   9344 
   9345 	DPRINTF(sc, WM_DEBUG_RX,
   9346 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9347 
   9348 	return more;
   9349 }
   9350 
   9351 /*
   9352  * wm_linkintr_gmii:
   9353  *
   9354  *	Helper; handle link interrupts for GMII.
   9355  */
   9356 static void
   9357 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9358 {
   9359 	device_t dev = sc->sc_dev;
   9360 	uint32_t status, reg;
   9361 	bool link;
   9362 	int rv;
   9363 
   9364 	KASSERT(WM_CORE_LOCKED(sc));
   9365 
   9366 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9367 		__func__));
   9368 
   9369 	if ((icr & ICR_LSC) == 0) {
   9370 		if (icr & ICR_RXSEQ)
   9371 			DPRINTF(sc, WM_DEBUG_LINK,
   9372 			    ("%s: LINK Receive sequence error\n",
   9373 				device_xname(dev)));
   9374 		return;
   9375 	}
   9376 
   9377 	/* Link status changed */
   9378 	status = CSR_READ(sc, WMREG_STATUS);
   9379 	link = status & STATUS_LU;
   9380 	if (link) {
   9381 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9382 			device_xname(dev),
   9383 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9384 		if (wm_phy_need_linkdown_discard(sc))
   9385 			wm_clear_linkdown_discard(sc);
   9386 	} else {
   9387 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9388 			device_xname(dev)));
   9389 		if (wm_phy_need_linkdown_discard(sc))
   9390 			wm_set_linkdown_discard(sc);
   9391 	}
   9392 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9393 		wm_gig_downshift_workaround_ich8lan(sc);
   9394 
   9395 	if ((sc->sc_type == WM_T_ICH8)
   9396 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9397 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9398 	}
   9399 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9400 		device_xname(dev)));
   9401 	mii_pollstat(&sc->sc_mii);
   9402 	if (sc->sc_type == WM_T_82543) {
   9403 		int miistatus, active;
   9404 
   9405 		/*
   9406 		 * With 82543, we need to force speed and
   9407 		 * duplex on the MAC equal to what the PHY
   9408 		 * speed and duplex configuration is.
   9409 		 */
   9410 		miistatus = sc->sc_mii.mii_media_status;
   9411 
   9412 		if (miistatus & IFM_ACTIVE) {
   9413 			active = sc->sc_mii.mii_media_active;
   9414 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9415 			switch (IFM_SUBTYPE(active)) {
   9416 			case IFM_10_T:
   9417 				sc->sc_ctrl |= CTRL_SPEED_10;
   9418 				break;
   9419 			case IFM_100_TX:
   9420 				sc->sc_ctrl |= CTRL_SPEED_100;
   9421 				break;
   9422 			case IFM_1000_T:
   9423 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9424 				break;
   9425 			default:
   9426 				/*
   9427 				 * Fiber?
   9428 				 * Shoud not enter here.
   9429 				 */
   9430 				device_printf(dev, "unknown media (%x)\n",
   9431 				    active);
   9432 				break;
   9433 			}
   9434 			if (active & IFM_FDX)
   9435 				sc->sc_ctrl |= CTRL_FD;
   9436 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9437 		}
   9438 	} else if (sc->sc_type == WM_T_PCH) {
   9439 		wm_k1_gig_workaround_hv(sc,
   9440 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9441 	}
   9442 
   9443 	/*
   9444 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9445 	 * aggressive resulting in many collisions. To avoid this, increase
   9446 	 * the IPG and reduce Rx latency in the PHY.
   9447 	 */
   9448 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9449 	    && link) {
   9450 		uint32_t tipg_reg;
   9451 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9452 		bool fdx;
   9453 		uint16_t emi_addr, emi_val;
   9454 
   9455 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9456 		tipg_reg &= ~TIPG_IPGT_MASK;
   9457 		fdx = status & STATUS_FD;
   9458 
   9459 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9460 			tipg_reg |= 0xff;
   9461 			/* Reduce Rx latency in analog PHY */
   9462 			emi_val = 0;
   9463 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9464 		    fdx && speed != STATUS_SPEED_1000) {
   9465 			tipg_reg |= 0xc;
   9466 			emi_val = 1;
   9467 		} else {
   9468 			/* Roll back the default values */
   9469 			tipg_reg |= 0x08;
   9470 			emi_val = 1;
   9471 		}
   9472 
   9473 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9474 
   9475 		rv = sc->phy.acquire(sc);
   9476 		if (rv)
   9477 			return;
   9478 
   9479 		if (sc->sc_type == WM_T_PCH2)
   9480 			emi_addr = I82579_RX_CONFIG;
   9481 		else
   9482 			emi_addr = I217_RX_CONFIG;
   9483 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9484 
   9485 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9486 			uint16_t phy_reg;
   9487 
   9488 			sc->phy.readreg_locked(dev, 2,
   9489 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9490 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9491 			if (speed == STATUS_SPEED_100
   9492 			    || speed == STATUS_SPEED_10)
   9493 				phy_reg |= 0x3e8;
   9494 			else
   9495 				phy_reg |= 0xfa;
   9496 			sc->phy.writereg_locked(dev, 2,
   9497 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9498 
   9499 			if (speed == STATUS_SPEED_1000) {
   9500 				sc->phy.readreg_locked(dev, 2,
   9501 				    HV_PM_CTRL, &phy_reg);
   9502 
   9503 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9504 
   9505 				sc->phy.writereg_locked(dev, 2,
   9506 				    HV_PM_CTRL, phy_reg);
   9507 			}
   9508 		}
   9509 		sc->phy.release(sc);
   9510 
   9511 		if (rv)
   9512 			return;
   9513 
   9514 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9515 			uint16_t data, ptr_gap;
   9516 
   9517 			if (speed == STATUS_SPEED_1000) {
   9518 				rv = sc->phy.acquire(sc);
   9519 				if (rv)
   9520 					return;
   9521 
   9522 				rv = sc->phy.readreg_locked(dev, 2,
   9523 				    I82579_UNKNOWN1, &data);
   9524 				if (rv) {
   9525 					sc->phy.release(sc);
   9526 					return;
   9527 				}
   9528 
   9529 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9530 				if (ptr_gap < 0x18) {
   9531 					data &= ~(0x3ff << 2);
   9532 					data |= (0x18 << 2);
   9533 					rv = sc->phy.writereg_locked(dev,
   9534 					    2, I82579_UNKNOWN1, data);
   9535 				}
   9536 				sc->phy.release(sc);
   9537 				if (rv)
   9538 					return;
   9539 			} else {
   9540 				rv = sc->phy.acquire(sc);
   9541 				if (rv)
   9542 					return;
   9543 
   9544 				rv = sc->phy.writereg_locked(dev, 2,
   9545 				    I82579_UNKNOWN1, 0xc023);
   9546 				sc->phy.release(sc);
   9547 				if (rv)
   9548 					return;
   9549 
   9550 			}
   9551 		}
   9552 	}
   9553 
   9554 	/*
   9555 	 * I217 Packet Loss issue:
   9556 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9557 	 * on power up.
   9558 	 * Set the Beacon Duration for I217 to 8 usec
   9559 	 */
   9560 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9561 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9562 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9563 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9564 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9565 	}
   9566 
   9567 	/* Work-around I218 hang issue */
   9568 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9569 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9570 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9571 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9572 		wm_k1_workaround_lpt_lp(sc, link);
   9573 
   9574 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9575 		/*
   9576 		 * Set platform power management values for Latency
   9577 		 * Tolerance Reporting (LTR)
   9578 		 */
   9579 		wm_platform_pm_pch_lpt(sc,
   9580 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9581 	}
   9582 
   9583 	/* Clear link partner's EEE ability */
   9584 	sc->eee_lp_ability = 0;
   9585 
   9586 	/* FEXTNVM6 K1-off workaround */
   9587 	if (sc->sc_type == WM_T_PCH_SPT) {
   9588 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9589 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9590 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9591 		else
   9592 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9593 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9594 	}
   9595 
   9596 	if (!link)
   9597 		return;
   9598 
   9599 	switch (sc->sc_type) {
   9600 	case WM_T_PCH2:
   9601 		wm_k1_workaround_lv(sc);
   9602 		/* FALLTHROUGH */
   9603 	case WM_T_PCH:
   9604 		if (sc->sc_phytype == WMPHY_82578)
   9605 			wm_link_stall_workaround_hv(sc);
   9606 		break;
   9607 	default:
   9608 		break;
   9609 	}
   9610 
   9611 	/* Enable/Disable EEE after link up */
   9612 	if (sc->sc_phytype > WMPHY_82579)
   9613 		wm_set_eee_pchlan(sc);
   9614 }
   9615 
   9616 /*
   9617  * wm_linkintr_tbi:
   9618  *
   9619  *	Helper; handle link interrupts for TBI mode.
   9620  */
   9621 static void
   9622 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9623 {
   9624 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9625 	uint32_t status;
   9626 
   9627 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9628 		__func__));
   9629 
   9630 	status = CSR_READ(sc, WMREG_STATUS);
   9631 	if (icr & ICR_LSC) {
   9632 		wm_check_for_link(sc);
   9633 		if (status & STATUS_LU) {
   9634 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9635 				device_xname(sc->sc_dev),
   9636 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9637 			/*
   9638 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9639 			 * so we should update sc->sc_ctrl
   9640 			 */
   9641 
   9642 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9643 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9644 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9645 			if (status & STATUS_FD)
   9646 				sc->sc_tctl |=
   9647 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9648 			else
   9649 				sc->sc_tctl |=
   9650 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9651 			if (sc->sc_ctrl & CTRL_TFCE)
   9652 				sc->sc_fcrtl |= FCRTL_XONE;
   9653 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9654 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9655 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9656 			sc->sc_tbi_linkup = 1;
   9657 			if_link_state_change(ifp, LINK_STATE_UP);
   9658 		} else {
   9659 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9660 				device_xname(sc->sc_dev)));
   9661 			sc->sc_tbi_linkup = 0;
   9662 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9663 		}
   9664 		/* Update LED */
   9665 		wm_tbi_serdes_set_linkled(sc);
   9666 	} else if (icr & ICR_RXSEQ)
   9667 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9668 			device_xname(sc->sc_dev)));
   9669 }
   9670 
   9671 /*
   9672  * wm_linkintr_serdes:
   9673  *
   9674  *	Helper; handle link interrupts for TBI mode.
   9675  */
   9676 static void
   9677 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9678 {
   9679 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9680 	struct mii_data *mii = &sc->sc_mii;
   9681 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9682 	uint32_t pcs_adv, pcs_lpab, reg;
   9683 
   9684 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9685 		__func__));
   9686 
   9687 	if (icr & ICR_LSC) {
   9688 		/* Check PCS */
   9689 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9690 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9691 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9692 				device_xname(sc->sc_dev)));
   9693 			mii->mii_media_status |= IFM_ACTIVE;
   9694 			sc->sc_tbi_linkup = 1;
   9695 			if_link_state_change(ifp, LINK_STATE_UP);
   9696 		} else {
   9697 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9698 				device_xname(sc->sc_dev)));
   9699 			mii->mii_media_status |= IFM_NONE;
   9700 			sc->sc_tbi_linkup = 0;
   9701 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9702 			wm_tbi_serdes_set_linkled(sc);
   9703 			return;
   9704 		}
   9705 		mii->mii_media_active |= IFM_1000_SX;
   9706 		if ((reg & PCS_LSTS_FDX) != 0)
   9707 			mii->mii_media_active |= IFM_FDX;
   9708 		else
   9709 			mii->mii_media_active |= IFM_HDX;
   9710 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9711 			/* Check flow */
   9712 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9713 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9714 				DPRINTF(sc, WM_DEBUG_LINK,
   9715 				    ("XXX LINKOK but not ACOMP\n"));
   9716 				return;
   9717 			}
   9718 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9719 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9720 			DPRINTF(sc, WM_DEBUG_LINK,
   9721 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9722 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9723 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9724 				mii->mii_media_active |= IFM_FLOW
   9725 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9726 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9727 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9728 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9729 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9730 				mii->mii_media_active |= IFM_FLOW
   9731 				    | IFM_ETH_TXPAUSE;
   9732 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9733 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9734 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9735 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9736 				mii->mii_media_active |= IFM_FLOW
   9737 				    | IFM_ETH_RXPAUSE;
   9738 		}
   9739 		/* Update LED */
   9740 		wm_tbi_serdes_set_linkled(sc);
   9741 	} else
   9742 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9743 		    device_xname(sc->sc_dev)));
   9744 }
   9745 
   9746 /*
   9747  * wm_linkintr:
   9748  *
   9749  *	Helper; handle link interrupts.
   9750  */
   9751 static void
   9752 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9753 {
   9754 
   9755 	KASSERT(WM_CORE_LOCKED(sc));
   9756 
   9757 	if (sc->sc_flags & WM_F_HAS_MII)
   9758 		wm_linkintr_gmii(sc, icr);
   9759 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9760 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9761 		wm_linkintr_serdes(sc, icr);
   9762 	else
   9763 		wm_linkintr_tbi(sc, icr);
   9764 }
   9765 
   9766 
   9767 static inline void
   9768 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9769 {
   9770 
   9771 	if (wmq->wmq_txrx_use_workqueue)
   9772 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9773 	else
   9774 		softint_schedule(wmq->wmq_si);
   9775 }
   9776 
   9777 static inline void
   9778 wm_legacy_intr_disable(struct wm_softc *sc)
   9779 {
   9780 
   9781 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   9782 }
   9783 
   9784 static inline void
   9785 wm_legacy_intr_enable(struct wm_softc *sc)
   9786 {
   9787 
   9788 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   9789 }
   9790 
   9791 /*
   9792  * wm_intr_legacy:
   9793  *
   9794  *	Interrupt service routine for INTx and MSI.
   9795  */
   9796 static int
   9797 wm_intr_legacy(void *arg)
   9798 {
   9799 	struct wm_softc *sc = arg;
   9800 	struct wm_queue *wmq = &sc->sc_queue[0];
   9801 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9802 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9803 	uint32_t icr, rndval = 0;
   9804 	int handled = 0;
   9805 	bool more = false;
   9806 
   9807 	while (1 /* CONSTCOND */) {
   9808 		icr = CSR_READ(sc, WMREG_ICR);
   9809 		if ((icr & sc->sc_icr) == 0)
   9810 			break;
   9811 		if (handled == 0)
   9812 			DPRINTF(sc, WM_DEBUG_TX,
   9813 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9814 		if (rndval == 0)
   9815 			rndval = icr;
   9816 
   9817 		mutex_enter(rxq->rxq_lock);
   9818 
   9819 		if (rxq->rxq_stopping) {
   9820 			mutex_exit(rxq->rxq_lock);
   9821 			break;
   9822 		}
   9823 
   9824 		handled = 1;
   9825 
   9826 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9827 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9828 			DPRINTF(sc, WM_DEBUG_RX,
   9829 			    ("%s: RX: got Rx intr 0x%08x\n",
   9830 				device_xname(sc->sc_dev),
   9831 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9832 			WM_Q_EVCNT_INCR(rxq, intr);
   9833 		}
   9834 #endif
   9835 		/*
   9836 		 * wm_rxeof() does *not* call upper layer functions directly,
   9837 		 * as if_percpuq_enqueue() just call softint_schedule().
   9838 		 * So, we can call wm_rxeof() in interrupt context.
   9839 		 */
   9840 		more = wm_rxeof(rxq, UINT_MAX);
   9841 
   9842 		mutex_exit(rxq->rxq_lock);
   9843 		mutex_enter(txq->txq_lock);
   9844 
   9845 		if (txq->txq_stopping) {
   9846 			mutex_exit(txq->txq_lock);
   9847 			break;
   9848 		}
   9849 
   9850 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9851 		if (icr & ICR_TXDW) {
   9852 			DPRINTF(sc, WM_DEBUG_TX,
   9853 			    ("%s: TX: got TXDW interrupt\n",
   9854 				device_xname(sc->sc_dev)));
   9855 			WM_Q_EVCNT_INCR(txq, txdw);
   9856 		}
   9857 #endif
   9858 		more |= wm_txeof(txq, UINT_MAX);
   9859 
   9860 		mutex_exit(txq->txq_lock);
   9861 		WM_CORE_LOCK(sc);
   9862 
   9863 		if (sc->sc_core_stopping) {
   9864 			WM_CORE_UNLOCK(sc);
   9865 			break;
   9866 		}
   9867 
   9868 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9869 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9870 			wm_linkintr(sc, icr);
   9871 		}
   9872 		if ((icr & ICR_GPI(0)) != 0)
   9873 			device_printf(sc->sc_dev, "got module interrupt\n");
   9874 
   9875 		WM_CORE_UNLOCK(sc);
   9876 
   9877 		if (icr & ICR_RXO) {
   9878 #if defined(WM_DEBUG)
   9879 			log(LOG_WARNING, "%s: Receive overrun\n",
   9880 			    device_xname(sc->sc_dev));
   9881 #endif /* defined(WM_DEBUG) */
   9882 		}
   9883 	}
   9884 
   9885 	rnd_add_uint32(&sc->rnd_source, rndval);
   9886 
   9887 	if (more) {
   9888 		/* Try to get more packets going. */
   9889 		wm_legacy_intr_disable(sc);
   9890 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9891 		wm_sched_handle_queue(sc, wmq);
   9892 	}
   9893 
   9894 	return handled;
   9895 }
   9896 
   9897 static inline void
   9898 wm_txrxintr_disable(struct wm_queue *wmq)
   9899 {
   9900 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9901 
   9902 	if (__predict_false(!wm_is_using_msix(sc))) {
   9903 		return wm_legacy_intr_disable(sc);
   9904 	}
   9905 
   9906 	if (sc->sc_type == WM_T_82574)
   9907 		CSR_WRITE(sc, WMREG_IMC,
   9908 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9909 	else if (sc->sc_type == WM_T_82575)
   9910 		CSR_WRITE(sc, WMREG_EIMC,
   9911 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9912 	else
   9913 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9914 }
   9915 
   9916 static inline void
   9917 wm_txrxintr_enable(struct wm_queue *wmq)
   9918 {
   9919 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9920 
   9921 	wm_itrs_calculate(sc, wmq);
   9922 
   9923 	if (__predict_false(!wm_is_using_msix(sc))) {
   9924 		return wm_legacy_intr_enable(sc);
   9925 	}
   9926 
   9927 	/*
   9928 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9929 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9930 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9931 	 * while each wm_handle_queue(wmq) is runnig.
   9932 	 */
   9933 	if (sc->sc_type == WM_T_82574)
   9934 		CSR_WRITE(sc, WMREG_IMS,
   9935 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9936 	else if (sc->sc_type == WM_T_82575)
   9937 		CSR_WRITE(sc, WMREG_EIMS,
   9938 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9939 	else
   9940 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9941 }
   9942 
   9943 static int
   9944 wm_txrxintr_msix(void *arg)
   9945 {
   9946 	struct wm_queue *wmq = arg;
   9947 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9948 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9949 	struct wm_softc *sc = txq->txq_sc;
   9950 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9951 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9952 	bool txmore;
   9953 	bool rxmore;
   9954 
   9955 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9956 
   9957 	DPRINTF(sc, WM_DEBUG_TX,
   9958 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9959 
   9960 	wm_txrxintr_disable(wmq);
   9961 
   9962 	mutex_enter(txq->txq_lock);
   9963 
   9964 	if (txq->txq_stopping) {
   9965 		mutex_exit(txq->txq_lock);
   9966 		return 0;
   9967 	}
   9968 
   9969 	WM_Q_EVCNT_INCR(txq, txdw);
   9970 	txmore = wm_txeof(txq, txlimit);
   9971 	/* wm_deferred start() is done in wm_handle_queue(). */
   9972 	mutex_exit(txq->txq_lock);
   9973 
   9974 	DPRINTF(sc, WM_DEBUG_RX,
   9975 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9976 	mutex_enter(rxq->rxq_lock);
   9977 
   9978 	if (rxq->rxq_stopping) {
   9979 		mutex_exit(rxq->rxq_lock);
   9980 		return 0;
   9981 	}
   9982 
   9983 	WM_Q_EVCNT_INCR(rxq, intr);
   9984 	rxmore = wm_rxeof(rxq, rxlimit);
   9985 	mutex_exit(rxq->rxq_lock);
   9986 
   9987 	wm_itrs_writereg(sc, wmq);
   9988 
   9989 	if (txmore || rxmore) {
   9990 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9991 		wm_sched_handle_queue(sc, wmq);
   9992 	} else
   9993 		wm_txrxintr_enable(wmq);
   9994 
   9995 	return 1;
   9996 }
   9997 
   9998 static void
   9999 wm_handle_queue(void *arg)
   10000 {
   10001 	struct wm_queue *wmq = arg;
   10002 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10003 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10004 	struct wm_softc *sc = txq->txq_sc;
   10005 	u_int txlimit = sc->sc_tx_process_limit;
   10006 	u_int rxlimit = sc->sc_rx_process_limit;
   10007 	bool txmore;
   10008 	bool rxmore;
   10009 
   10010 	mutex_enter(txq->txq_lock);
   10011 	if (txq->txq_stopping) {
   10012 		mutex_exit(txq->txq_lock);
   10013 		return;
   10014 	}
   10015 	txmore = wm_txeof(txq, txlimit);
   10016 	wm_deferred_start_locked(txq);
   10017 	mutex_exit(txq->txq_lock);
   10018 
   10019 	mutex_enter(rxq->rxq_lock);
   10020 	if (rxq->rxq_stopping) {
   10021 		mutex_exit(rxq->rxq_lock);
   10022 		return;
   10023 	}
   10024 	WM_Q_EVCNT_INCR(rxq, defer);
   10025 	rxmore = wm_rxeof(rxq, rxlimit);
   10026 	mutex_exit(rxq->rxq_lock);
   10027 
   10028 	if (txmore || rxmore) {
   10029 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10030 		wm_sched_handle_queue(sc, wmq);
   10031 	} else
   10032 		wm_txrxintr_enable(wmq);
   10033 }
   10034 
   10035 static void
   10036 wm_handle_queue_work(struct work *wk, void *context)
   10037 {
   10038 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10039 
   10040 	/*
   10041 	 * "enqueued flag" is not required here.
   10042 	 */
   10043 	wm_handle_queue(wmq);
   10044 }
   10045 
   10046 /*
   10047  * wm_linkintr_msix:
   10048  *
   10049  *	Interrupt service routine for link status change for MSI-X.
   10050  */
   10051 static int
   10052 wm_linkintr_msix(void *arg)
   10053 {
   10054 	struct wm_softc *sc = arg;
   10055 	uint32_t reg;
   10056 	bool has_rxo;
   10057 
   10058 	reg = CSR_READ(sc, WMREG_ICR);
   10059 	WM_CORE_LOCK(sc);
   10060 	DPRINTF(sc, WM_DEBUG_LINK,
   10061 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10062 		device_xname(sc->sc_dev), reg));
   10063 
   10064 	if (sc->sc_core_stopping)
   10065 		goto out;
   10066 
   10067 	if ((reg & ICR_LSC) != 0) {
   10068 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10069 		wm_linkintr(sc, ICR_LSC);
   10070 	}
   10071 	if ((reg & ICR_GPI(0)) != 0)
   10072 		device_printf(sc->sc_dev, "got module interrupt\n");
   10073 
   10074 	/*
   10075 	 * XXX 82574 MSI-X mode workaround
   10076 	 *
   10077 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10078 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10079 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10080 	 * interrupts by writing WMREG_ICS to process receive packets.
   10081 	 */
   10082 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10083 #if defined(WM_DEBUG)
   10084 		log(LOG_WARNING, "%s: Receive overrun\n",
   10085 		    device_xname(sc->sc_dev));
   10086 #endif /* defined(WM_DEBUG) */
   10087 
   10088 		has_rxo = true;
   10089 		/*
   10090 		 * The RXO interrupt is very high rate when receive traffic is
   10091 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10092 		 * interrupts. ICR_OTHER will be enabled at the end of
   10093 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10094 		 * ICR_RXQ(1) interrupts.
   10095 		 */
   10096 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10097 
   10098 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10099 	}
   10100 
   10101 
   10102 
   10103 out:
   10104 	WM_CORE_UNLOCK(sc);
   10105 
   10106 	if (sc->sc_type == WM_T_82574) {
   10107 		if (!has_rxo)
   10108 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10109 		else
   10110 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10111 	} else if (sc->sc_type == WM_T_82575)
   10112 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10113 	else
   10114 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10115 
   10116 	return 1;
   10117 }
   10118 
   10119 /*
   10120  * Media related.
   10121  * GMII, SGMII, TBI (and SERDES)
   10122  */
   10123 
   10124 /* Common */
   10125 
   10126 /*
   10127  * wm_tbi_serdes_set_linkled:
   10128  *
   10129  *	Update the link LED on TBI and SERDES devices.
   10130  */
   10131 static void
   10132 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10133 {
   10134 
   10135 	if (sc->sc_tbi_linkup)
   10136 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10137 	else
   10138 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10139 
   10140 	/* 82540 or newer devices are active low */
   10141 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10142 
   10143 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10144 }
   10145 
   10146 /* GMII related */
   10147 
   10148 /*
   10149  * wm_gmii_reset:
   10150  *
   10151  *	Reset the PHY.
   10152  */
   10153 static void
   10154 wm_gmii_reset(struct wm_softc *sc)
   10155 {
   10156 	uint32_t reg;
   10157 	int rv;
   10158 
   10159 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10160 		device_xname(sc->sc_dev), __func__));
   10161 
   10162 	rv = sc->phy.acquire(sc);
   10163 	if (rv != 0) {
   10164 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10165 		    __func__);
   10166 		return;
   10167 	}
   10168 
   10169 	switch (sc->sc_type) {
   10170 	case WM_T_82542_2_0:
   10171 	case WM_T_82542_2_1:
   10172 		/* null */
   10173 		break;
   10174 	case WM_T_82543:
   10175 		/*
   10176 		 * With 82543, we need to force speed and duplex on the MAC
   10177 		 * equal to what the PHY speed and duplex configuration is.
   10178 		 * In addition, we need to perform a hardware reset on the PHY
   10179 		 * to take it out of reset.
   10180 		 */
   10181 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10182 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10183 
   10184 		/* The PHY reset pin is active-low. */
   10185 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10186 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10187 		    CTRL_EXT_SWDPIN(4));
   10188 		reg |= CTRL_EXT_SWDPIO(4);
   10189 
   10190 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10191 		CSR_WRITE_FLUSH(sc);
   10192 		delay(10*1000);
   10193 
   10194 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10195 		CSR_WRITE_FLUSH(sc);
   10196 		delay(150);
   10197 #if 0
   10198 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10199 #endif
   10200 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10201 		break;
   10202 	case WM_T_82544:	/* Reset 10000us */
   10203 	case WM_T_82540:
   10204 	case WM_T_82545:
   10205 	case WM_T_82545_3:
   10206 	case WM_T_82546:
   10207 	case WM_T_82546_3:
   10208 	case WM_T_82541:
   10209 	case WM_T_82541_2:
   10210 	case WM_T_82547:
   10211 	case WM_T_82547_2:
   10212 	case WM_T_82571:	/* Reset 100us */
   10213 	case WM_T_82572:
   10214 	case WM_T_82573:
   10215 	case WM_T_82574:
   10216 	case WM_T_82575:
   10217 	case WM_T_82576:
   10218 	case WM_T_82580:
   10219 	case WM_T_I350:
   10220 	case WM_T_I354:
   10221 	case WM_T_I210:
   10222 	case WM_T_I211:
   10223 	case WM_T_82583:
   10224 	case WM_T_80003:
   10225 		/* Generic reset */
   10226 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10227 		CSR_WRITE_FLUSH(sc);
   10228 		delay(20000);
   10229 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10230 		CSR_WRITE_FLUSH(sc);
   10231 		delay(20000);
   10232 
   10233 		if ((sc->sc_type == WM_T_82541)
   10234 		    || (sc->sc_type == WM_T_82541_2)
   10235 		    || (sc->sc_type == WM_T_82547)
   10236 		    || (sc->sc_type == WM_T_82547_2)) {
   10237 			/* Workaround for igp are done in igp_reset() */
   10238 			/* XXX add code to set LED after phy reset */
   10239 		}
   10240 		break;
   10241 	case WM_T_ICH8:
   10242 	case WM_T_ICH9:
   10243 	case WM_T_ICH10:
   10244 	case WM_T_PCH:
   10245 	case WM_T_PCH2:
   10246 	case WM_T_PCH_LPT:
   10247 	case WM_T_PCH_SPT:
   10248 	case WM_T_PCH_CNP:
   10249 		/* Generic reset */
   10250 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10251 		CSR_WRITE_FLUSH(sc);
   10252 		delay(100);
   10253 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10254 		CSR_WRITE_FLUSH(sc);
   10255 		delay(150);
   10256 		break;
   10257 	default:
   10258 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10259 		    __func__);
   10260 		break;
   10261 	}
   10262 
   10263 	sc->phy.release(sc);
   10264 
   10265 	/* get_cfg_done */
   10266 	wm_get_cfg_done(sc);
   10267 
   10268 	/* Extra setup */
   10269 	switch (sc->sc_type) {
   10270 	case WM_T_82542_2_0:
   10271 	case WM_T_82542_2_1:
   10272 	case WM_T_82543:
   10273 	case WM_T_82544:
   10274 	case WM_T_82540:
   10275 	case WM_T_82545:
   10276 	case WM_T_82545_3:
   10277 	case WM_T_82546:
   10278 	case WM_T_82546_3:
   10279 	case WM_T_82541_2:
   10280 	case WM_T_82547_2:
   10281 	case WM_T_82571:
   10282 	case WM_T_82572:
   10283 	case WM_T_82573:
   10284 	case WM_T_82574:
   10285 	case WM_T_82583:
   10286 	case WM_T_82575:
   10287 	case WM_T_82576:
   10288 	case WM_T_82580:
   10289 	case WM_T_I350:
   10290 	case WM_T_I354:
   10291 	case WM_T_I210:
   10292 	case WM_T_I211:
   10293 	case WM_T_80003:
   10294 		/* Null */
   10295 		break;
   10296 	case WM_T_82541:
   10297 	case WM_T_82547:
   10298 		/* XXX Configure actively LED after PHY reset */
   10299 		break;
   10300 	case WM_T_ICH8:
   10301 	case WM_T_ICH9:
   10302 	case WM_T_ICH10:
   10303 	case WM_T_PCH:
   10304 	case WM_T_PCH2:
   10305 	case WM_T_PCH_LPT:
   10306 	case WM_T_PCH_SPT:
   10307 	case WM_T_PCH_CNP:
   10308 		wm_phy_post_reset(sc);
   10309 		break;
   10310 	default:
   10311 		panic("%s: unknown type\n", __func__);
   10312 		break;
   10313 	}
   10314 }
   10315 
   10316 /*
   10317  * Setup sc_phytype and mii_{read|write}reg.
   10318  *
   10319  *  To identify PHY type, correct read/write function should be selected.
   10320  * To select correct read/write function, PCI ID or MAC type are required
   10321  * without accessing PHY registers.
   10322  *
   10323  *  On the first call of this function, PHY ID is not known yet. Check
   10324  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10325  * result might be incorrect.
   10326  *
   10327  *  In the second call, PHY OUI and model is used to identify PHY type.
   10328  * It might not be perfect because of the lack of compared entry, but it
   10329  * would be better than the first call.
   10330  *
   10331  *  If the detected new result and previous assumption is different,
   10332  * diagnous message will be printed.
   10333  */
   10334 static void
   10335 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10336     uint16_t phy_model)
   10337 {
   10338 	device_t dev = sc->sc_dev;
   10339 	struct mii_data *mii = &sc->sc_mii;
   10340 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10341 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10342 	mii_readreg_t new_readreg;
   10343 	mii_writereg_t new_writereg;
   10344 	bool dodiag = true;
   10345 
   10346 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10347 		device_xname(sc->sc_dev), __func__));
   10348 
   10349 	/*
   10350 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10351 	 * incorrect. So don't print diag output when it's 2nd call.
   10352 	 */
   10353 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10354 		dodiag = false;
   10355 
   10356 	if (mii->mii_readreg == NULL) {
   10357 		/*
   10358 		 *  This is the first call of this function. For ICH and PCH
   10359 		 * variants, it's difficult to determine the PHY access method
   10360 		 * by sc_type, so use the PCI product ID for some devices.
   10361 		 */
   10362 
   10363 		switch (sc->sc_pcidevid) {
   10364 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10365 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10366 			/* 82577 */
   10367 			new_phytype = WMPHY_82577;
   10368 			break;
   10369 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10370 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10371 			/* 82578 */
   10372 			new_phytype = WMPHY_82578;
   10373 			break;
   10374 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10375 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10376 			/* 82579 */
   10377 			new_phytype = WMPHY_82579;
   10378 			break;
   10379 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10380 		case PCI_PRODUCT_INTEL_82801I_BM:
   10381 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10382 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10383 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10384 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10385 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10386 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10387 			/* ICH8, 9, 10 with 82567 */
   10388 			new_phytype = WMPHY_BM;
   10389 			break;
   10390 		default:
   10391 			break;
   10392 		}
   10393 	} else {
   10394 		/* It's not the first call. Use PHY OUI and model */
   10395 		switch (phy_oui) {
   10396 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10397 			switch (phy_model) {
   10398 			case 0x0004: /* XXX */
   10399 				new_phytype = WMPHY_82578;
   10400 				break;
   10401 			default:
   10402 				break;
   10403 			}
   10404 			break;
   10405 		case MII_OUI_xxMARVELL:
   10406 			switch (phy_model) {
   10407 			case MII_MODEL_xxMARVELL_I210:
   10408 				new_phytype = WMPHY_I210;
   10409 				break;
   10410 			case MII_MODEL_xxMARVELL_E1011:
   10411 			case MII_MODEL_xxMARVELL_E1000_3:
   10412 			case MII_MODEL_xxMARVELL_E1000_5:
   10413 			case MII_MODEL_xxMARVELL_E1112:
   10414 				new_phytype = WMPHY_M88;
   10415 				break;
   10416 			case MII_MODEL_xxMARVELL_E1149:
   10417 				new_phytype = WMPHY_BM;
   10418 				break;
   10419 			case MII_MODEL_xxMARVELL_E1111:
   10420 			case MII_MODEL_xxMARVELL_I347:
   10421 			case MII_MODEL_xxMARVELL_E1512:
   10422 			case MII_MODEL_xxMARVELL_E1340M:
   10423 			case MII_MODEL_xxMARVELL_E1543:
   10424 				new_phytype = WMPHY_M88;
   10425 				break;
   10426 			case MII_MODEL_xxMARVELL_I82563:
   10427 				new_phytype = WMPHY_GG82563;
   10428 				break;
   10429 			default:
   10430 				break;
   10431 			}
   10432 			break;
   10433 		case MII_OUI_INTEL:
   10434 			switch (phy_model) {
   10435 			case MII_MODEL_INTEL_I82577:
   10436 				new_phytype = WMPHY_82577;
   10437 				break;
   10438 			case MII_MODEL_INTEL_I82579:
   10439 				new_phytype = WMPHY_82579;
   10440 				break;
   10441 			case MII_MODEL_INTEL_I217:
   10442 				new_phytype = WMPHY_I217;
   10443 				break;
   10444 			case MII_MODEL_INTEL_I82580:
   10445 				new_phytype = WMPHY_82580;
   10446 				break;
   10447 			case MII_MODEL_INTEL_I350:
   10448 				new_phytype = WMPHY_I350;
   10449 				break;
   10450 				break;
   10451 			default:
   10452 				break;
   10453 			}
   10454 			break;
   10455 		case MII_OUI_yyINTEL:
   10456 			switch (phy_model) {
   10457 			case MII_MODEL_yyINTEL_I82562G:
   10458 			case MII_MODEL_yyINTEL_I82562EM:
   10459 			case MII_MODEL_yyINTEL_I82562ET:
   10460 				new_phytype = WMPHY_IFE;
   10461 				break;
   10462 			case MII_MODEL_yyINTEL_IGP01E1000:
   10463 				new_phytype = WMPHY_IGP;
   10464 				break;
   10465 			case MII_MODEL_yyINTEL_I82566:
   10466 				new_phytype = WMPHY_IGP_3;
   10467 				break;
   10468 			default:
   10469 				break;
   10470 			}
   10471 			break;
   10472 		default:
   10473 			break;
   10474 		}
   10475 
   10476 		if (dodiag) {
   10477 			if (new_phytype == WMPHY_UNKNOWN)
   10478 				aprint_verbose_dev(dev,
   10479 				    "%s: Unknown PHY model. OUI=%06x, "
   10480 				    "model=%04x\n", __func__, phy_oui,
   10481 				    phy_model);
   10482 
   10483 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10484 			    && (sc->sc_phytype != new_phytype)) {
   10485 				aprint_error_dev(dev, "Previously assumed PHY "
   10486 				    "type(%u) was incorrect. PHY type from PHY"
   10487 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10488 			}
   10489 		}
   10490 	}
   10491 
   10492 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10493 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10494 		/* SGMII */
   10495 		new_readreg = wm_sgmii_readreg;
   10496 		new_writereg = wm_sgmii_writereg;
   10497 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10498 		/* BM2 (phyaddr == 1) */
   10499 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10500 		    && (new_phytype != WMPHY_BM)
   10501 		    && (new_phytype != WMPHY_UNKNOWN))
   10502 			doubt_phytype = new_phytype;
   10503 		new_phytype = WMPHY_BM;
   10504 		new_readreg = wm_gmii_bm_readreg;
   10505 		new_writereg = wm_gmii_bm_writereg;
   10506 	} else if (sc->sc_type >= WM_T_PCH) {
   10507 		/* All PCH* use _hv_ */
   10508 		new_readreg = wm_gmii_hv_readreg;
   10509 		new_writereg = wm_gmii_hv_writereg;
   10510 	} else if (sc->sc_type >= WM_T_ICH8) {
   10511 		/* non-82567 ICH8, 9 and 10 */
   10512 		new_readreg = wm_gmii_i82544_readreg;
   10513 		new_writereg = wm_gmii_i82544_writereg;
   10514 	} else if (sc->sc_type >= WM_T_80003) {
   10515 		/* 80003 */
   10516 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10517 		    && (new_phytype != WMPHY_GG82563)
   10518 		    && (new_phytype != WMPHY_UNKNOWN))
   10519 			doubt_phytype = new_phytype;
   10520 		new_phytype = WMPHY_GG82563;
   10521 		new_readreg = wm_gmii_i80003_readreg;
   10522 		new_writereg = wm_gmii_i80003_writereg;
   10523 	} else if (sc->sc_type >= WM_T_I210) {
   10524 		/* I210 and I211 */
   10525 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10526 		    && (new_phytype != WMPHY_I210)
   10527 		    && (new_phytype != WMPHY_UNKNOWN))
   10528 			doubt_phytype = new_phytype;
   10529 		new_phytype = WMPHY_I210;
   10530 		new_readreg = wm_gmii_gs40g_readreg;
   10531 		new_writereg = wm_gmii_gs40g_writereg;
   10532 	} else if (sc->sc_type >= WM_T_82580) {
   10533 		/* 82580, I350 and I354 */
   10534 		new_readreg = wm_gmii_82580_readreg;
   10535 		new_writereg = wm_gmii_82580_writereg;
   10536 	} else if (sc->sc_type >= WM_T_82544) {
   10537 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10538 		new_readreg = wm_gmii_i82544_readreg;
   10539 		new_writereg = wm_gmii_i82544_writereg;
   10540 	} else {
   10541 		new_readreg = wm_gmii_i82543_readreg;
   10542 		new_writereg = wm_gmii_i82543_writereg;
   10543 	}
   10544 
   10545 	if (new_phytype == WMPHY_BM) {
   10546 		/* All BM use _bm_ */
   10547 		new_readreg = wm_gmii_bm_readreg;
   10548 		new_writereg = wm_gmii_bm_writereg;
   10549 	}
   10550 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10551 		/* All PCH* use _hv_ */
   10552 		new_readreg = wm_gmii_hv_readreg;
   10553 		new_writereg = wm_gmii_hv_writereg;
   10554 	}
   10555 
   10556 	/* Diag output */
   10557 	if (dodiag) {
   10558 		if (doubt_phytype != WMPHY_UNKNOWN)
   10559 			aprint_error_dev(dev, "Assumed new PHY type was "
   10560 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10561 			    new_phytype);
   10562 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10563 		    && (sc->sc_phytype != new_phytype))
   10564 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10565 			    "was incorrect. New PHY type = %u\n",
   10566 			    sc->sc_phytype, new_phytype);
   10567 
   10568 		if ((mii->mii_readreg != NULL) &&
   10569 		    (new_phytype == WMPHY_UNKNOWN))
   10570 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10571 
   10572 		if ((mii->mii_readreg != NULL) &&
   10573 		    (mii->mii_readreg != new_readreg))
   10574 			aprint_error_dev(dev, "Previously assumed PHY "
   10575 			    "read/write function was incorrect.\n");
   10576 	}
   10577 
   10578 	/* Update now */
   10579 	sc->sc_phytype = new_phytype;
   10580 	mii->mii_readreg = new_readreg;
   10581 	mii->mii_writereg = new_writereg;
   10582 	if (new_readreg == wm_gmii_hv_readreg) {
   10583 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10584 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10585 	} else if (new_readreg == wm_sgmii_readreg) {
   10586 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10587 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10588 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10589 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10590 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10591 	}
   10592 }
   10593 
   10594 /*
   10595  * wm_get_phy_id_82575:
   10596  *
   10597  * Return PHY ID. Return -1 if it failed.
   10598  */
   10599 static int
   10600 wm_get_phy_id_82575(struct wm_softc *sc)
   10601 {
   10602 	uint32_t reg;
   10603 	int phyid = -1;
   10604 
   10605 	/* XXX */
   10606 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10607 		return -1;
   10608 
   10609 	if (wm_sgmii_uses_mdio(sc)) {
   10610 		switch (sc->sc_type) {
   10611 		case WM_T_82575:
   10612 		case WM_T_82576:
   10613 			reg = CSR_READ(sc, WMREG_MDIC);
   10614 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10615 			break;
   10616 		case WM_T_82580:
   10617 		case WM_T_I350:
   10618 		case WM_T_I354:
   10619 		case WM_T_I210:
   10620 		case WM_T_I211:
   10621 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10622 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10623 			break;
   10624 		default:
   10625 			return -1;
   10626 		}
   10627 	}
   10628 
   10629 	return phyid;
   10630 }
   10631 
   10632 /*
   10633  * wm_gmii_mediainit:
   10634  *
   10635  *	Initialize media for use on 1000BASE-T devices.
   10636  */
   10637 static void
   10638 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10639 {
   10640 	device_t dev = sc->sc_dev;
   10641 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10642 	struct mii_data *mii = &sc->sc_mii;
   10643 
   10644 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10645 		device_xname(sc->sc_dev), __func__));
   10646 
   10647 	/* We have GMII. */
   10648 	sc->sc_flags |= WM_F_HAS_MII;
   10649 
   10650 	if (sc->sc_type == WM_T_80003)
   10651 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10652 	else
   10653 		sc->sc_tipg = TIPG_1000T_DFLT;
   10654 
   10655 	/*
   10656 	 * Let the chip set speed/duplex on its own based on
   10657 	 * signals from the PHY.
   10658 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10659 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10660 	 */
   10661 	sc->sc_ctrl |= CTRL_SLU;
   10662 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10663 
   10664 	/* Initialize our media structures and probe the GMII. */
   10665 	mii->mii_ifp = ifp;
   10666 
   10667 	mii->mii_statchg = wm_gmii_statchg;
   10668 
   10669 	/* get PHY control from SMBus to PCIe */
   10670 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10671 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10672 	    || (sc->sc_type == WM_T_PCH_CNP))
   10673 		wm_init_phy_workarounds_pchlan(sc);
   10674 
   10675 	wm_gmii_reset(sc);
   10676 
   10677 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10678 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10679 	    wm_gmii_mediastatus, sc->sc_core_lock);
   10680 
   10681 	/* Setup internal SGMII PHY for SFP */
   10682 	wm_sgmii_sfp_preconfig(sc);
   10683 
   10684 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10685 	    || (sc->sc_type == WM_T_82580)
   10686 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10687 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10688 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10689 			/* Attach only one port */
   10690 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10691 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10692 		} else {
   10693 			int i, id;
   10694 			uint32_t ctrl_ext;
   10695 
   10696 			id = wm_get_phy_id_82575(sc);
   10697 			if (id != -1) {
   10698 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10699 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10700 			}
   10701 			if ((id == -1)
   10702 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10703 				/* Power on sgmii phy if it is disabled */
   10704 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10705 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10706 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10707 				CSR_WRITE_FLUSH(sc);
   10708 				delay(300*1000); /* XXX too long */
   10709 
   10710 				/*
   10711 				 * From 1 to 8.
   10712 				 *
   10713 				 * I2C access fails with I2C register's ERROR
   10714 				 * bit set, so prevent error message while
   10715 				 * scanning.
   10716 				 */
   10717 				sc->phy.no_errprint = true;
   10718 				for (i = 1; i < 8; i++)
   10719 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10720 					    0xffffffff, i, MII_OFFSET_ANY,
   10721 					    MIIF_DOPAUSE);
   10722 				sc->phy.no_errprint = false;
   10723 
   10724 				/* Restore previous sfp cage power state */
   10725 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10726 			}
   10727 		}
   10728 	} else
   10729 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10730 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10731 
   10732 	/*
   10733 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10734 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10735 	 */
   10736 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10737 		|| (sc->sc_type == WM_T_PCH_SPT)
   10738 		|| (sc->sc_type == WM_T_PCH_CNP))
   10739 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10740 		wm_set_mdio_slow_mode_hv(sc);
   10741 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10742 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10743 	}
   10744 
   10745 	/*
   10746 	 * (For ICH8 variants)
   10747 	 * If PHY detection failed, use BM's r/w function and retry.
   10748 	 */
   10749 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10750 		/* if failed, retry with *_bm_* */
   10751 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10752 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10753 		    sc->sc_phytype);
   10754 		sc->sc_phytype = WMPHY_BM;
   10755 		mii->mii_readreg = wm_gmii_bm_readreg;
   10756 		mii->mii_writereg = wm_gmii_bm_writereg;
   10757 
   10758 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10759 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10760 	}
   10761 
   10762 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10763 		/* Any PHY wasn't find */
   10764 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10765 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10766 		sc->sc_phytype = WMPHY_NONE;
   10767 	} else {
   10768 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10769 
   10770 		/*
   10771 		 * PHY Found! Check PHY type again by the second call of
   10772 		 * wm_gmii_setup_phytype.
   10773 		 */
   10774 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10775 		    child->mii_mpd_model);
   10776 
   10777 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10778 	}
   10779 }
   10780 
   10781 /*
   10782  * wm_gmii_mediachange:	[ifmedia interface function]
   10783  *
   10784  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10785  */
   10786 static int
   10787 wm_gmii_mediachange(struct ifnet *ifp)
   10788 {
   10789 	struct wm_softc *sc = ifp->if_softc;
   10790 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10791 	uint32_t reg;
   10792 	int rc;
   10793 
   10794 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10795 		device_xname(sc->sc_dev), __func__));
   10796 	if ((ifp->if_flags & IFF_UP) == 0)
   10797 		return 0;
   10798 
   10799 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10800 	if ((sc->sc_type == WM_T_82580)
   10801 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10802 	    || (sc->sc_type == WM_T_I211)) {
   10803 		reg = CSR_READ(sc, WMREG_PHPM);
   10804 		reg &= ~PHPM_GO_LINK_D;
   10805 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10806 	}
   10807 
   10808 	/* Disable D0 LPLU. */
   10809 	wm_lplu_d0_disable(sc);
   10810 
   10811 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10812 	sc->sc_ctrl |= CTRL_SLU;
   10813 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10814 	    || (sc->sc_type > WM_T_82543)) {
   10815 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10816 	} else {
   10817 		sc->sc_ctrl &= ~CTRL_ASDE;
   10818 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10819 		if (ife->ifm_media & IFM_FDX)
   10820 			sc->sc_ctrl |= CTRL_FD;
   10821 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10822 		case IFM_10_T:
   10823 			sc->sc_ctrl |= CTRL_SPEED_10;
   10824 			break;
   10825 		case IFM_100_TX:
   10826 			sc->sc_ctrl |= CTRL_SPEED_100;
   10827 			break;
   10828 		case IFM_1000_T:
   10829 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10830 			break;
   10831 		case IFM_NONE:
   10832 			/* There is no specific setting for IFM_NONE */
   10833 			break;
   10834 		default:
   10835 			panic("wm_gmii_mediachange: bad media 0x%x",
   10836 			    ife->ifm_media);
   10837 		}
   10838 	}
   10839 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10840 	CSR_WRITE_FLUSH(sc);
   10841 
   10842 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10843 		wm_serdes_mediachange(ifp);
   10844 
   10845 	if (sc->sc_type <= WM_T_82543)
   10846 		wm_gmii_reset(sc);
   10847 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10848 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10849 		/* allow time for SFP cage time to power up phy */
   10850 		delay(300 * 1000);
   10851 		wm_gmii_reset(sc);
   10852 	}
   10853 
   10854 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10855 		return 0;
   10856 	return rc;
   10857 }
   10858 
   10859 /*
   10860  * wm_gmii_mediastatus:	[ifmedia interface function]
   10861  *
   10862  *	Get the current interface media status on a 1000BASE-T device.
   10863  */
   10864 static void
   10865 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10866 {
   10867 	struct wm_softc *sc = ifp->if_softc;
   10868 
   10869 	ether_mediastatus(ifp, ifmr);
   10870 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10871 	    | sc->sc_flowflags;
   10872 }
   10873 
   10874 #define	MDI_IO		CTRL_SWDPIN(2)
   10875 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10876 #define	MDI_CLK		CTRL_SWDPIN(3)
   10877 
   10878 static void
   10879 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10880 {
   10881 	uint32_t i, v;
   10882 
   10883 	v = CSR_READ(sc, WMREG_CTRL);
   10884 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10885 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10886 
   10887 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10888 		if (data & i)
   10889 			v |= MDI_IO;
   10890 		else
   10891 			v &= ~MDI_IO;
   10892 		CSR_WRITE(sc, WMREG_CTRL, v);
   10893 		CSR_WRITE_FLUSH(sc);
   10894 		delay(10);
   10895 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10896 		CSR_WRITE_FLUSH(sc);
   10897 		delay(10);
   10898 		CSR_WRITE(sc, WMREG_CTRL, v);
   10899 		CSR_WRITE_FLUSH(sc);
   10900 		delay(10);
   10901 	}
   10902 }
   10903 
   10904 static uint16_t
   10905 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10906 {
   10907 	uint32_t v, i;
   10908 	uint16_t data = 0;
   10909 
   10910 	v = CSR_READ(sc, WMREG_CTRL);
   10911 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10912 	v |= CTRL_SWDPIO(3);
   10913 
   10914 	CSR_WRITE(sc, WMREG_CTRL, v);
   10915 	CSR_WRITE_FLUSH(sc);
   10916 	delay(10);
   10917 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10918 	CSR_WRITE_FLUSH(sc);
   10919 	delay(10);
   10920 	CSR_WRITE(sc, WMREG_CTRL, v);
   10921 	CSR_WRITE_FLUSH(sc);
   10922 	delay(10);
   10923 
   10924 	for (i = 0; i < 16; i++) {
   10925 		data <<= 1;
   10926 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10927 		CSR_WRITE_FLUSH(sc);
   10928 		delay(10);
   10929 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10930 			data |= 1;
   10931 		CSR_WRITE(sc, WMREG_CTRL, v);
   10932 		CSR_WRITE_FLUSH(sc);
   10933 		delay(10);
   10934 	}
   10935 
   10936 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10937 	CSR_WRITE_FLUSH(sc);
   10938 	delay(10);
   10939 	CSR_WRITE(sc, WMREG_CTRL, v);
   10940 	CSR_WRITE_FLUSH(sc);
   10941 	delay(10);
   10942 
   10943 	return data;
   10944 }
   10945 
   10946 #undef MDI_IO
   10947 #undef MDI_DIR
   10948 #undef MDI_CLK
   10949 
   10950 /*
   10951  * wm_gmii_i82543_readreg:	[mii interface function]
   10952  *
   10953  *	Read a PHY register on the GMII (i82543 version).
   10954  */
   10955 static int
   10956 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10957 {
   10958 	struct wm_softc *sc = device_private(dev);
   10959 
   10960 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10961 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10962 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10963 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10964 
   10965 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10966 		device_xname(dev), phy, reg, *val));
   10967 
   10968 	return 0;
   10969 }
   10970 
   10971 /*
   10972  * wm_gmii_i82543_writereg:	[mii interface function]
   10973  *
   10974  *	Write a PHY register on the GMII (i82543 version).
   10975  */
   10976 static int
   10977 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10978 {
   10979 	struct wm_softc *sc = device_private(dev);
   10980 
   10981 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10982 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10983 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10984 	    (MII_COMMAND_START << 30), 32);
   10985 
   10986 	return 0;
   10987 }
   10988 
   10989 /*
   10990  * wm_gmii_mdic_readreg:	[mii interface function]
   10991  *
   10992  *	Read a PHY register on the GMII.
   10993  */
   10994 static int
   10995 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10996 {
   10997 	struct wm_softc *sc = device_private(dev);
   10998 	uint32_t mdic = 0;
   10999 	int i;
   11000 
   11001 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11002 	    && (reg > MII_ADDRMASK)) {
   11003 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11004 		    __func__, sc->sc_phytype, reg);
   11005 		reg &= MII_ADDRMASK;
   11006 	}
   11007 
   11008 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   11009 	    MDIC_REGADD(reg));
   11010 
   11011 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11012 		delay(50);
   11013 		mdic = CSR_READ(sc, WMREG_MDIC);
   11014 		if (mdic & MDIC_READY)
   11015 			break;
   11016 	}
   11017 
   11018 	if ((mdic & MDIC_READY) == 0) {
   11019 		DPRINTF(sc, WM_DEBUG_GMII,
   11020 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11021 			device_xname(dev), phy, reg));
   11022 		return ETIMEDOUT;
   11023 	} else if (mdic & MDIC_E) {
   11024 		/* This is normal if no PHY is present. */
   11025 		DPRINTF(sc, WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   11026 			device_xname(sc->sc_dev), phy, reg));
   11027 		return -1;
   11028 	} else
   11029 		*val = MDIC_DATA(mdic);
   11030 
   11031 	/*
   11032 	 * Allow some time after each MDIC transaction to avoid
   11033 	 * reading duplicate data in the next MDIC transaction.
   11034 	 */
   11035 	if (sc->sc_type == WM_T_PCH2)
   11036 		delay(100);
   11037 
   11038 	return 0;
   11039 }
   11040 
   11041 /*
   11042  * wm_gmii_mdic_writereg:	[mii interface function]
   11043  *
   11044  *	Write a PHY register on the GMII.
   11045  */
   11046 static int
   11047 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11048 {
   11049 	struct wm_softc *sc = device_private(dev);
   11050 	uint32_t mdic = 0;
   11051 	int i;
   11052 
   11053 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11054 	    && (reg > MII_ADDRMASK)) {
   11055 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11056 		    __func__, sc->sc_phytype, reg);
   11057 		reg &= MII_ADDRMASK;
   11058 	}
   11059 
   11060 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11061 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11062 
   11063 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11064 		delay(50);
   11065 		mdic = CSR_READ(sc, WMREG_MDIC);
   11066 		if (mdic & MDIC_READY)
   11067 			break;
   11068 	}
   11069 
   11070 	if ((mdic & MDIC_READY) == 0) {
   11071 		DPRINTF(sc, WM_DEBUG_GMII,
   11072 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11073 			device_xname(dev), phy, reg));
   11074 		return ETIMEDOUT;
   11075 	} else if (mdic & MDIC_E) {
   11076 		DPRINTF(sc, WM_DEBUG_GMII,
   11077 		    ("%s: MDIC write error: phy %d reg %d\n",
   11078 			device_xname(dev), phy, reg));
   11079 		return -1;
   11080 	}
   11081 
   11082 	/*
   11083 	 * Allow some time after each MDIC transaction to avoid
   11084 	 * reading duplicate data in the next MDIC transaction.
   11085 	 */
   11086 	if (sc->sc_type == WM_T_PCH2)
   11087 		delay(100);
   11088 
   11089 	return 0;
   11090 }
   11091 
   11092 /*
   11093  * wm_gmii_i82544_readreg:	[mii interface function]
   11094  *
   11095  *	Read a PHY register on the GMII.
   11096  */
   11097 static int
   11098 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11099 {
   11100 	struct wm_softc *sc = device_private(dev);
   11101 	int rv;
   11102 
   11103 	if (sc->phy.acquire(sc)) {
   11104 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11105 		return -1;
   11106 	}
   11107 
   11108 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11109 
   11110 	sc->phy.release(sc);
   11111 
   11112 	return rv;
   11113 }
   11114 
   11115 static int
   11116 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11117 {
   11118 	struct wm_softc *sc = device_private(dev);
   11119 	int rv;
   11120 
   11121 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11122 		switch (sc->sc_phytype) {
   11123 		case WMPHY_IGP:
   11124 		case WMPHY_IGP_2:
   11125 		case WMPHY_IGP_3:
   11126 			rv = wm_gmii_mdic_writereg(dev, phy,
   11127 			    IGPHY_PAGE_SELECT, reg);
   11128 			if (rv != 0)
   11129 				return rv;
   11130 			break;
   11131 		default:
   11132 #ifdef WM_DEBUG
   11133 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11134 			    __func__, sc->sc_phytype, reg);
   11135 #endif
   11136 			break;
   11137 		}
   11138 	}
   11139 
   11140 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11141 }
   11142 
   11143 /*
   11144  * wm_gmii_i82544_writereg:	[mii interface function]
   11145  *
   11146  *	Write a PHY register on the GMII.
   11147  */
   11148 static int
   11149 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11150 {
   11151 	struct wm_softc *sc = device_private(dev);
   11152 	int rv;
   11153 
   11154 	if (sc->phy.acquire(sc)) {
   11155 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11156 		return -1;
   11157 	}
   11158 
   11159 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11160 	sc->phy.release(sc);
   11161 
   11162 	return rv;
   11163 }
   11164 
   11165 static int
   11166 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11167 {
   11168 	struct wm_softc *sc = device_private(dev);
   11169 	int rv;
   11170 
   11171 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11172 		switch (sc->sc_phytype) {
   11173 		case WMPHY_IGP:
   11174 		case WMPHY_IGP_2:
   11175 		case WMPHY_IGP_3:
   11176 			rv = wm_gmii_mdic_writereg(dev, phy,
   11177 			    IGPHY_PAGE_SELECT, reg);
   11178 			if (rv != 0)
   11179 				return rv;
   11180 			break;
   11181 		default:
   11182 #ifdef WM_DEBUG
   11183 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11184 			    __func__, sc->sc_phytype, reg);
   11185 #endif
   11186 			break;
   11187 		}
   11188 	}
   11189 
   11190 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11191 }
   11192 
   11193 /*
   11194  * wm_gmii_i80003_readreg:	[mii interface function]
   11195  *
   11196  *	Read a PHY register on the kumeran
   11197  * This could be handled by the PHY layer if we didn't have to lock the
   11198  * resource ...
   11199  */
   11200 static int
   11201 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11202 {
   11203 	struct wm_softc *sc = device_private(dev);
   11204 	int page_select;
   11205 	uint16_t temp, temp2;
   11206 	int rv = 0;
   11207 
   11208 	if (phy != 1) /* Only one PHY on kumeran bus */
   11209 		return -1;
   11210 
   11211 	if (sc->phy.acquire(sc)) {
   11212 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11213 		return -1;
   11214 	}
   11215 
   11216 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11217 		page_select = GG82563_PHY_PAGE_SELECT;
   11218 	else {
   11219 		/*
   11220 		 * Use Alternative Page Select register to access registers
   11221 		 * 30 and 31.
   11222 		 */
   11223 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11224 	}
   11225 	temp = reg >> GG82563_PAGE_SHIFT;
   11226 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11227 		goto out;
   11228 
   11229 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11230 		/*
   11231 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11232 		 * register.
   11233 		 */
   11234 		delay(200);
   11235 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11236 		if ((rv != 0) || (temp2 != temp)) {
   11237 			device_printf(dev, "%s failed\n", __func__);
   11238 			rv = -1;
   11239 			goto out;
   11240 		}
   11241 		delay(200);
   11242 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11243 		delay(200);
   11244 	} else
   11245 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11246 
   11247 out:
   11248 	sc->phy.release(sc);
   11249 	return rv;
   11250 }
   11251 
   11252 /*
   11253  * wm_gmii_i80003_writereg:	[mii interface function]
   11254  *
   11255  *	Write a PHY register on the kumeran.
   11256  * This could be handled by the PHY layer if we didn't have to lock the
   11257  * resource ...
   11258  */
   11259 static int
   11260 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11261 {
   11262 	struct wm_softc *sc = device_private(dev);
   11263 	int page_select, rv;
   11264 	uint16_t temp, temp2;
   11265 
   11266 	if (phy != 1) /* Only one PHY on kumeran bus */
   11267 		return -1;
   11268 
   11269 	if (sc->phy.acquire(sc)) {
   11270 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11271 		return -1;
   11272 	}
   11273 
   11274 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11275 		page_select = GG82563_PHY_PAGE_SELECT;
   11276 	else {
   11277 		/*
   11278 		 * Use Alternative Page Select register to access registers
   11279 		 * 30 and 31.
   11280 		 */
   11281 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11282 	}
   11283 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11284 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11285 		goto out;
   11286 
   11287 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11288 		/*
   11289 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11290 		 * register.
   11291 		 */
   11292 		delay(200);
   11293 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11294 		if ((rv != 0) || (temp2 != temp)) {
   11295 			device_printf(dev, "%s failed\n", __func__);
   11296 			rv = -1;
   11297 			goto out;
   11298 		}
   11299 		delay(200);
   11300 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11301 		delay(200);
   11302 	} else
   11303 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11304 
   11305 out:
   11306 	sc->phy.release(sc);
   11307 	return rv;
   11308 }
   11309 
   11310 /*
   11311  * wm_gmii_bm_readreg:	[mii interface function]
   11312  *
   11313  *	Read a PHY register on the kumeran
   11314  * This could be handled by the PHY layer if we didn't have to lock the
   11315  * resource ...
   11316  */
   11317 static int
   11318 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11319 {
   11320 	struct wm_softc *sc = device_private(dev);
   11321 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11322 	int rv;
   11323 
   11324 	if (sc->phy.acquire(sc)) {
   11325 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11326 		return -1;
   11327 	}
   11328 
   11329 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11330 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11331 		    || (reg == 31)) ? 1 : phy;
   11332 	/* Page 800 works differently than the rest so it has its own func */
   11333 	if (page == BM_WUC_PAGE) {
   11334 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11335 		goto release;
   11336 	}
   11337 
   11338 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11339 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11340 		    && (sc->sc_type != WM_T_82583))
   11341 			rv = wm_gmii_mdic_writereg(dev, phy,
   11342 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11343 		else
   11344 			rv = wm_gmii_mdic_writereg(dev, phy,
   11345 			    BME1000_PHY_PAGE_SELECT, page);
   11346 		if (rv != 0)
   11347 			goto release;
   11348 	}
   11349 
   11350 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11351 
   11352 release:
   11353 	sc->phy.release(sc);
   11354 	return rv;
   11355 }
   11356 
   11357 /*
   11358  * wm_gmii_bm_writereg:	[mii interface function]
   11359  *
   11360  *	Write a PHY register on the kumeran.
   11361  * This could be handled by the PHY layer if we didn't have to lock the
   11362  * resource ...
   11363  */
   11364 static int
   11365 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11366 {
   11367 	struct wm_softc *sc = device_private(dev);
   11368 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11369 	int rv;
   11370 
   11371 	if (sc->phy.acquire(sc)) {
   11372 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11373 		return -1;
   11374 	}
   11375 
   11376 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11377 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11378 		    || (reg == 31)) ? 1 : phy;
   11379 	/* Page 800 works differently than the rest so it has its own func */
   11380 	if (page == BM_WUC_PAGE) {
   11381 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11382 		goto release;
   11383 	}
   11384 
   11385 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11386 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11387 		    && (sc->sc_type != WM_T_82583))
   11388 			rv = wm_gmii_mdic_writereg(dev, phy,
   11389 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11390 		else
   11391 			rv = wm_gmii_mdic_writereg(dev, phy,
   11392 			    BME1000_PHY_PAGE_SELECT, page);
   11393 		if (rv != 0)
   11394 			goto release;
   11395 	}
   11396 
   11397 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11398 
   11399 release:
   11400 	sc->phy.release(sc);
   11401 	return rv;
   11402 }
   11403 
   11404 /*
   11405  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11406  *  @dev: pointer to the HW structure
   11407  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11408  *
   11409  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11410  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11411  */
   11412 static int
   11413 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11414 {
   11415 #ifdef WM_DEBUG
   11416 	struct wm_softc *sc = device_private(dev);
   11417 #endif
   11418 	uint16_t temp;
   11419 	int rv;
   11420 
   11421 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11422 		device_xname(dev), __func__));
   11423 
   11424 	if (!phy_regp)
   11425 		return -1;
   11426 
   11427 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11428 
   11429 	/* Select Port Control Registers page */
   11430 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11431 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11432 	if (rv != 0)
   11433 		return rv;
   11434 
   11435 	/* Read WUCE and save it */
   11436 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11437 	if (rv != 0)
   11438 		return rv;
   11439 
   11440 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11441 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11442 	 */
   11443 	temp = *phy_regp;
   11444 	temp |= BM_WUC_ENABLE_BIT;
   11445 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11446 
   11447 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11448 		return rv;
   11449 
   11450 	/* Select Host Wakeup Registers page - caller now able to write
   11451 	 * registers on the Wakeup registers page
   11452 	 */
   11453 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11454 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11455 }
   11456 
   11457 /*
   11458  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11459  *  @dev: pointer to the HW structure
   11460  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11461  *
   11462  *  Restore BM_WUC_ENABLE_REG to its original value.
   11463  *
   11464  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11465  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11466  *  caller.
   11467  */
   11468 static int
   11469 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11470 {
   11471 #ifdef WM_DEBUG
   11472 	struct wm_softc *sc = device_private(dev);
   11473 #endif
   11474 
   11475 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11476 		device_xname(dev), __func__));
   11477 
   11478 	if (!phy_regp)
   11479 		return -1;
   11480 
   11481 	/* Select Port Control Registers page */
   11482 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11483 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11484 
   11485 	/* Restore 769.17 to its original value */
   11486 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11487 
   11488 	return 0;
   11489 }
   11490 
   11491 /*
   11492  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11493  *  @sc: pointer to the HW structure
   11494  *  @offset: register offset to be read or written
   11495  *  @val: pointer to the data to read or write
   11496  *  @rd: determines if operation is read or write
   11497  *  @page_set: BM_WUC_PAGE already set and access enabled
   11498  *
   11499  *  Read the PHY register at offset and store the retrieved information in
   11500  *  data, or write data to PHY register at offset.  Note the procedure to
   11501  *  access the PHY wakeup registers is different than reading the other PHY
   11502  *  registers. It works as such:
   11503  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11504  *  2) Set page to 800 for host (801 if we were manageability)
   11505  *  3) Write the address using the address opcode (0x11)
   11506  *  4) Read or write the data using the data opcode (0x12)
   11507  *  5) Restore 769.17.2 to its original value
   11508  *
   11509  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11510  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11511  *
   11512  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11513  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11514  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11515  */
   11516 static int
   11517 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11518 	bool page_set)
   11519 {
   11520 	struct wm_softc *sc = device_private(dev);
   11521 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11522 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11523 	uint16_t wuce;
   11524 	int rv = 0;
   11525 
   11526 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11527 		device_xname(dev), __func__));
   11528 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11529 	if ((sc->sc_type == WM_T_PCH)
   11530 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11531 		device_printf(dev,
   11532 		    "Attempting to access page %d while gig enabled.\n", page);
   11533 	}
   11534 
   11535 	if (!page_set) {
   11536 		/* Enable access to PHY wakeup registers */
   11537 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11538 		if (rv != 0) {
   11539 			device_printf(dev,
   11540 			    "%s: Could not enable PHY wakeup reg access\n",
   11541 			    __func__);
   11542 			return rv;
   11543 		}
   11544 	}
   11545 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11546 		device_xname(sc->sc_dev), __func__, page, regnum));
   11547 
   11548 	/*
   11549 	 * 2) Access PHY wakeup register.
   11550 	 * See wm_access_phy_wakeup_reg_bm.
   11551 	 */
   11552 
   11553 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11554 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11555 	if (rv != 0)
   11556 		return rv;
   11557 
   11558 	if (rd) {
   11559 		/* Read the Wakeup register page value using opcode 0x12 */
   11560 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11561 	} else {
   11562 		/* Write the Wakeup register page value using opcode 0x12 */
   11563 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11564 	}
   11565 	if (rv != 0)
   11566 		return rv;
   11567 
   11568 	if (!page_set)
   11569 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11570 
   11571 	return rv;
   11572 }
   11573 
   11574 /*
   11575  * wm_gmii_hv_readreg:	[mii interface function]
   11576  *
   11577  *	Read a PHY register on the kumeran
   11578  * This could be handled by the PHY layer if we didn't have to lock the
   11579  * resource ...
   11580  */
   11581 static int
   11582 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11583 {
   11584 	struct wm_softc *sc = device_private(dev);
   11585 	int rv;
   11586 
   11587 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11588 		device_xname(dev), __func__));
   11589 	if (sc->phy.acquire(sc)) {
   11590 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11591 		return -1;
   11592 	}
   11593 
   11594 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11595 	sc->phy.release(sc);
   11596 	return rv;
   11597 }
   11598 
   11599 static int
   11600 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11601 {
   11602 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11603 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11604 	int rv;
   11605 
   11606 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11607 
   11608 	/* Page 800 works differently than the rest so it has its own func */
   11609 	if (page == BM_WUC_PAGE)
   11610 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11611 
   11612 	/*
   11613 	 * Lower than page 768 works differently than the rest so it has its
   11614 	 * own func
   11615 	 */
   11616 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11617 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11618 		return -1;
   11619 	}
   11620 
   11621 	/*
   11622 	 * XXX I21[789] documents say that the SMBus Address register is at
   11623 	 * PHY address 01, Page 0 (not 768), Register 26.
   11624 	 */
   11625 	if (page == HV_INTC_FC_PAGE_START)
   11626 		page = 0;
   11627 
   11628 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11629 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11630 		    page << BME1000_PAGE_SHIFT);
   11631 		if (rv != 0)
   11632 			return rv;
   11633 	}
   11634 
   11635 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11636 }
   11637 
   11638 /*
   11639  * wm_gmii_hv_writereg:	[mii interface function]
   11640  *
   11641  *	Write a PHY register on the kumeran.
   11642  * This could be handled by the PHY layer if we didn't have to lock the
   11643  * resource ...
   11644  */
   11645 static int
   11646 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11647 {
   11648 	struct wm_softc *sc = device_private(dev);
   11649 	int rv;
   11650 
   11651 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11652 		device_xname(dev), __func__));
   11653 
   11654 	if (sc->phy.acquire(sc)) {
   11655 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11656 		return -1;
   11657 	}
   11658 
   11659 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11660 	sc->phy.release(sc);
   11661 
   11662 	return rv;
   11663 }
   11664 
   11665 static int
   11666 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11667 {
   11668 	struct wm_softc *sc = device_private(dev);
   11669 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11670 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11671 	int rv;
   11672 
   11673 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11674 
   11675 	/* Page 800 works differently than the rest so it has its own func */
   11676 	if (page == BM_WUC_PAGE)
   11677 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11678 		    false);
   11679 
   11680 	/*
   11681 	 * Lower than page 768 works differently than the rest so it has its
   11682 	 * own func
   11683 	 */
   11684 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11685 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11686 		return -1;
   11687 	}
   11688 
   11689 	{
   11690 		/*
   11691 		 * XXX I21[789] documents say that the SMBus Address register
   11692 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11693 		 */
   11694 		if (page == HV_INTC_FC_PAGE_START)
   11695 			page = 0;
   11696 
   11697 		/*
   11698 		 * XXX Workaround MDIO accesses being disabled after entering
   11699 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11700 		 * register is set)
   11701 		 */
   11702 		if (sc->sc_phytype == WMPHY_82578) {
   11703 			struct mii_softc *child;
   11704 
   11705 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11706 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11707 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11708 			    && ((val & (1 << 11)) != 0)) {
   11709 				device_printf(dev, "XXX need workaround\n");
   11710 			}
   11711 		}
   11712 
   11713 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11714 			rv = wm_gmii_mdic_writereg(dev, 1,
   11715 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11716 			if (rv != 0)
   11717 				return rv;
   11718 		}
   11719 	}
   11720 
   11721 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11722 }
   11723 
   11724 /*
   11725  * wm_gmii_82580_readreg:	[mii interface function]
   11726  *
   11727  *	Read a PHY register on the 82580 and I350.
   11728  * This could be handled by the PHY layer if we didn't have to lock the
   11729  * resource ...
   11730  */
   11731 static int
   11732 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11733 {
   11734 	struct wm_softc *sc = device_private(dev);
   11735 	int rv;
   11736 
   11737 	if (sc->phy.acquire(sc) != 0) {
   11738 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11739 		return -1;
   11740 	}
   11741 
   11742 #ifdef DIAGNOSTIC
   11743 	if (reg > MII_ADDRMASK) {
   11744 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11745 		    __func__, sc->sc_phytype, reg);
   11746 		reg &= MII_ADDRMASK;
   11747 	}
   11748 #endif
   11749 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11750 
   11751 	sc->phy.release(sc);
   11752 	return rv;
   11753 }
   11754 
   11755 /*
   11756  * wm_gmii_82580_writereg:	[mii interface function]
   11757  *
   11758  *	Write a PHY register on the 82580 and I350.
   11759  * This could be handled by the PHY layer if we didn't have to lock the
   11760  * resource ...
   11761  */
   11762 static int
   11763 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11764 {
   11765 	struct wm_softc *sc = device_private(dev);
   11766 	int rv;
   11767 
   11768 	if (sc->phy.acquire(sc) != 0) {
   11769 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11770 		return -1;
   11771 	}
   11772 
   11773 #ifdef DIAGNOSTIC
   11774 	if (reg > MII_ADDRMASK) {
   11775 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11776 		    __func__, sc->sc_phytype, reg);
   11777 		reg &= MII_ADDRMASK;
   11778 	}
   11779 #endif
   11780 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11781 
   11782 	sc->phy.release(sc);
   11783 	return rv;
   11784 }
   11785 
   11786 /*
   11787  * wm_gmii_gs40g_readreg:	[mii interface function]
   11788  *
   11789  *	Read a PHY register on the I2100 and I211.
   11790  * This could be handled by the PHY layer if we didn't have to lock the
   11791  * resource ...
   11792  */
   11793 static int
   11794 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11795 {
   11796 	struct wm_softc *sc = device_private(dev);
   11797 	int page, offset;
   11798 	int rv;
   11799 
   11800 	/* Acquire semaphore */
   11801 	if (sc->phy.acquire(sc)) {
   11802 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11803 		return -1;
   11804 	}
   11805 
   11806 	/* Page select */
   11807 	page = reg >> GS40G_PAGE_SHIFT;
   11808 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11809 	if (rv != 0)
   11810 		goto release;
   11811 
   11812 	/* Read reg */
   11813 	offset = reg & GS40G_OFFSET_MASK;
   11814 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11815 
   11816 release:
   11817 	sc->phy.release(sc);
   11818 	return rv;
   11819 }
   11820 
   11821 /*
   11822  * wm_gmii_gs40g_writereg:	[mii interface function]
   11823  *
   11824  *	Write a PHY register on the I210 and I211.
   11825  * This could be handled by the PHY layer if we didn't have to lock the
   11826  * resource ...
   11827  */
   11828 static int
   11829 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11830 {
   11831 	struct wm_softc *sc = device_private(dev);
   11832 	uint16_t page;
   11833 	int offset, rv;
   11834 
   11835 	/* Acquire semaphore */
   11836 	if (sc->phy.acquire(sc)) {
   11837 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11838 		return -1;
   11839 	}
   11840 
   11841 	/* Page select */
   11842 	page = reg >> GS40G_PAGE_SHIFT;
   11843 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11844 	if (rv != 0)
   11845 		goto release;
   11846 
   11847 	/* Write reg */
   11848 	offset = reg & GS40G_OFFSET_MASK;
   11849 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11850 
   11851 release:
   11852 	/* Release semaphore */
   11853 	sc->phy.release(sc);
   11854 	return rv;
   11855 }
   11856 
   11857 /*
   11858  * wm_gmii_statchg:	[mii interface function]
   11859  *
   11860  *	Callback from MII layer when media changes.
   11861  */
   11862 static void
   11863 wm_gmii_statchg(struct ifnet *ifp)
   11864 {
   11865 	struct wm_softc *sc = ifp->if_softc;
   11866 	struct mii_data *mii = &sc->sc_mii;
   11867 
   11868 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11869 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11870 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11871 
   11872 	/* Get flow control negotiation result. */
   11873 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11874 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11875 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11876 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11877 	}
   11878 
   11879 	if (sc->sc_flowflags & IFM_FLOW) {
   11880 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11881 			sc->sc_ctrl |= CTRL_TFCE;
   11882 			sc->sc_fcrtl |= FCRTL_XONE;
   11883 		}
   11884 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11885 			sc->sc_ctrl |= CTRL_RFCE;
   11886 	}
   11887 
   11888 	if (mii->mii_media_active & IFM_FDX) {
   11889 		DPRINTF(sc, WM_DEBUG_LINK,
   11890 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11891 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11892 	} else {
   11893 		DPRINTF(sc, WM_DEBUG_LINK,
   11894 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11895 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11896 	}
   11897 
   11898 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11899 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11900 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11901 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11902 	if (sc->sc_type == WM_T_80003) {
   11903 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11904 		case IFM_1000_T:
   11905 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11906 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11907 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11908 			break;
   11909 		default:
   11910 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11911 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11912 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11913 			break;
   11914 		}
   11915 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11916 	}
   11917 }
   11918 
   11919 /* kumeran related (80003, ICH* and PCH*) */
   11920 
   11921 /*
   11922  * wm_kmrn_readreg:
   11923  *
   11924  *	Read a kumeran register
   11925  */
   11926 static int
   11927 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11928 {
   11929 	int rv;
   11930 
   11931 	if (sc->sc_type == WM_T_80003)
   11932 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11933 	else
   11934 		rv = sc->phy.acquire(sc);
   11935 	if (rv != 0) {
   11936 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11937 		    __func__);
   11938 		return rv;
   11939 	}
   11940 
   11941 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11942 
   11943 	if (sc->sc_type == WM_T_80003)
   11944 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11945 	else
   11946 		sc->phy.release(sc);
   11947 
   11948 	return rv;
   11949 }
   11950 
   11951 static int
   11952 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11953 {
   11954 
   11955 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11956 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11957 	    KUMCTRLSTA_REN);
   11958 	CSR_WRITE_FLUSH(sc);
   11959 	delay(2);
   11960 
   11961 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11962 
   11963 	return 0;
   11964 }
   11965 
   11966 /*
   11967  * wm_kmrn_writereg:
   11968  *
   11969  *	Write a kumeran register
   11970  */
   11971 static int
   11972 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11973 {
   11974 	int rv;
   11975 
   11976 	if (sc->sc_type == WM_T_80003)
   11977 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11978 	else
   11979 		rv = sc->phy.acquire(sc);
   11980 	if (rv != 0) {
   11981 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11982 		    __func__);
   11983 		return rv;
   11984 	}
   11985 
   11986 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11987 
   11988 	if (sc->sc_type == WM_T_80003)
   11989 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11990 	else
   11991 		sc->phy.release(sc);
   11992 
   11993 	return rv;
   11994 }
   11995 
   11996 static int
   11997 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11998 {
   11999 
   12000 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12001 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   12002 
   12003 	return 0;
   12004 }
   12005 
   12006 /*
   12007  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   12008  * This access method is different from IEEE MMD.
   12009  */
   12010 static int
   12011 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   12012 {
   12013 	struct wm_softc *sc = device_private(dev);
   12014 	int rv;
   12015 
   12016 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   12017 	if (rv != 0)
   12018 		return rv;
   12019 
   12020 	if (rd)
   12021 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12022 	else
   12023 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12024 	return rv;
   12025 }
   12026 
   12027 static int
   12028 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12029 {
   12030 
   12031 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12032 }
   12033 
   12034 static int
   12035 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12036 {
   12037 
   12038 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12039 }
   12040 
   12041 /* SGMII related */
   12042 
   12043 /*
   12044  * wm_sgmii_uses_mdio
   12045  *
   12046  * Check whether the transaction is to the internal PHY or the external
   12047  * MDIO interface. Return true if it's MDIO.
   12048  */
   12049 static bool
   12050 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12051 {
   12052 	uint32_t reg;
   12053 	bool ismdio = false;
   12054 
   12055 	switch (sc->sc_type) {
   12056 	case WM_T_82575:
   12057 	case WM_T_82576:
   12058 		reg = CSR_READ(sc, WMREG_MDIC);
   12059 		ismdio = ((reg & MDIC_DEST) != 0);
   12060 		break;
   12061 	case WM_T_82580:
   12062 	case WM_T_I350:
   12063 	case WM_T_I354:
   12064 	case WM_T_I210:
   12065 	case WM_T_I211:
   12066 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12067 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12068 		break;
   12069 	default:
   12070 		break;
   12071 	}
   12072 
   12073 	return ismdio;
   12074 }
   12075 
   12076 /* Setup internal SGMII PHY for SFP */
   12077 static void
   12078 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12079 {
   12080 	uint16_t id1, id2, phyreg;
   12081 	int i, rv;
   12082 
   12083 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12084 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12085 		return;
   12086 
   12087 	for (i = 0; i < MII_NPHY; i++) {
   12088 		sc->phy.no_errprint = true;
   12089 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12090 		if (rv != 0)
   12091 			continue;
   12092 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12093 		if (rv != 0)
   12094 			continue;
   12095 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12096 			continue;
   12097 		sc->phy.no_errprint = false;
   12098 
   12099 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12100 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12101 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12102 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12103 		break;
   12104 	}
   12105 
   12106 }
   12107 
   12108 /*
   12109  * wm_sgmii_readreg:	[mii interface function]
   12110  *
   12111  *	Read a PHY register on the SGMII
   12112  * This could be handled by the PHY layer if we didn't have to lock the
   12113  * resource ...
   12114  */
   12115 static int
   12116 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12117 {
   12118 	struct wm_softc *sc = device_private(dev);
   12119 	int rv;
   12120 
   12121 	if (sc->phy.acquire(sc)) {
   12122 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12123 		return -1;
   12124 	}
   12125 
   12126 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12127 
   12128 	sc->phy.release(sc);
   12129 	return rv;
   12130 }
   12131 
   12132 static int
   12133 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12134 {
   12135 	struct wm_softc *sc = device_private(dev);
   12136 	uint32_t i2ccmd;
   12137 	int i, rv = 0;
   12138 
   12139 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12140 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12141 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12142 
   12143 	/* Poll the ready bit */
   12144 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12145 		delay(50);
   12146 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12147 		if (i2ccmd & I2CCMD_READY)
   12148 			break;
   12149 	}
   12150 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12151 		device_printf(dev, "I2CCMD Read did not complete\n");
   12152 		rv = ETIMEDOUT;
   12153 	}
   12154 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12155 		if (!sc->phy.no_errprint)
   12156 			device_printf(dev, "I2CCMD Error bit set\n");
   12157 		rv = EIO;
   12158 	}
   12159 
   12160 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12161 
   12162 	return rv;
   12163 }
   12164 
   12165 /*
   12166  * wm_sgmii_writereg:	[mii interface function]
   12167  *
   12168  *	Write a PHY register on the SGMII.
   12169  * This could be handled by the PHY layer if we didn't have to lock the
   12170  * resource ...
   12171  */
   12172 static int
   12173 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12174 {
   12175 	struct wm_softc *sc = device_private(dev);
   12176 	int rv;
   12177 
   12178 	if (sc->phy.acquire(sc) != 0) {
   12179 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12180 		return -1;
   12181 	}
   12182 
   12183 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12184 
   12185 	sc->phy.release(sc);
   12186 
   12187 	return rv;
   12188 }
   12189 
   12190 static int
   12191 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12192 {
   12193 	struct wm_softc *sc = device_private(dev);
   12194 	uint32_t i2ccmd;
   12195 	uint16_t swapdata;
   12196 	int rv = 0;
   12197 	int i;
   12198 
   12199 	/* Swap the data bytes for the I2C interface */
   12200 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12201 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12202 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12203 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12204 
   12205 	/* Poll the ready bit */
   12206 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12207 		delay(50);
   12208 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12209 		if (i2ccmd & I2CCMD_READY)
   12210 			break;
   12211 	}
   12212 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12213 		device_printf(dev, "I2CCMD Write did not complete\n");
   12214 		rv = ETIMEDOUT;
   12215 	}
   12216 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12217 		device_printf(dev, "I2CCMD Error bit set\n");
   12218 		rv = EIO;
   12219 	}
   12220 
   12221 	return rv;
   12222 }
   12223 
   12224 /* TBI related */
   12225 
   12226 static bool
   12227 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12228 {
   12229 	bool sig;
   12230 
   12231 	sig = ctrl & CTRL_SWDPIN(1);
   12232 
   12233 	/*
   12234 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12235 	 * detect a signal, 1 if they don't.
   12236 	 */
   12237 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12238 		sig = !sig;
   12239 
   12240 	return sig;
   12241 }
   12242 
   12243 /*
   12244  * wm_tbi_mediainit:
   12245  *
   12246  *	Initialize media for use on 1000BASE-X devices.
   12247  */
   12248 static void
   12249 wm_tbi_mediainit(struct wm_softc *sc)
   12250 {
   12251 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12252 	const char *sep = "";
   12253 
   12254 	if (sc->sc_type < WM_T_82543)
   12255 		sc->sc_tipg = TIPG_WM_DFLT;
   12256 	else
   12257 		sc->sc_tipg = TIPG_LG_DFLT;
   12258 
   12259 	sc->sc_tbi_serdes_anegticks = 5;
   12260 
   12261 	/* Initialize our media structures */
   12262 	sc->sc_mii.mii_ifp = ifp;
   12263 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12264 
   12265 	ifp->if_baudrate = IF_Gbps(1);
   12266 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12267 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12268 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12269 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12270 		    sc->sc_core_lock);
   12271 	} else {
   12272 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12273 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12274 	}
   12275 
   12276 	/*
   12277 	 * SWD Pins:
   12278 	 *
   12279 	 *	0 = Link LED (output)
   12280 	 *	1 = Loss Of Signal (input)
   12281 	 */
   12282 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12283 
   12284 	/* XXX Perhaps this is only for TBI */
   12285 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12286 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12287 
   12288 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12289 		sc->sc_ctrl &= ~CTRL_LRST;
   12290 
   12291 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12292 
   12293 #define	ADD(ss, mm, dd)							\
   12294 do {									\
   12295 	aprint_normal("%s%s", sep, ss);					\
   12296 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12297 	sep = ", ";							\
   12298 } while (/*CONSTCOND*/0)
   12299 
   12300 	aprint_normal_dev(sc->sc_dev, "");
   12301 
   12302 	if (sc->sc_type == WM_T_I354) {
   12303 		uint32_t status;
   12304 
   12305 		status = CSR_READ(sc, WMREG_STATUS);
   12306 		if (((status & STATUS_2P5_SKU) != 0)
   12307 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12308 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12309 		} else
   12310 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12311 	} else if (sc->sc_type == WM_T_82545) {
   12312 		/* Only 82545 is LX (XXX except SFP) */
   12313 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12314 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12315 	} else if (sc->sc_sfptype != 0) {
   12316 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12317 		switch (sc->sc_sfptype) {
   12318 		default:
   12319 		case SFF_SFP_ETH_FLAGS_1000SX:
   12320 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12321 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12322 			break;
   12323 		case SFF_SFP_ETH_FLAGS_1000LX:
   12324 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12325 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12326 			break;
   12327 		case SFF_SFP_ETH_FLAGS_1000CX:
   12328 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12329 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12330 			break;
   12331 		case SFF_SFP_ETH_FLAGS_1000T:
   12332 			ADD("1000baseT", IFM_1000_T, 0);
   12333 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12334 			break;
   12335 		case SFF_SFP_ETH_FLAGS_100FX:
   12336 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12337 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12338 			break;
   12339 		}
   12340 	} else {
   12341 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12342 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12343 	}
   12344 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12345 	aprint_normal("\n");
   12346 
   12347 #undef ADD
   12348 
   12349 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12350 }
   12351 
   12352 /*
   12353  * wm_tbi_mediachange:	[ifmedia interface function]
   12354  *
   12355  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12356  */
   12357 static int
   12358 wm_tbi_mediachange(struct ifnet *ifp)
   12359 {
   12360 	struct wm_softc *sc = ifp->if_softc;
   12361 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12362 	uint32_t status, ctrl;
   12363 	bool signal;
   12364 	int i;
   12365 
   12366 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12367 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12368 		/* XXX need some work for >= 82571 and < 82575 */
   12369 		if (sc->sc_type < WM_T_82575)
   12370 			return 0;
   12371 	}
   12372 
   12373 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12374 	    || (sc->sc_type >= WM_T_82575))
   12375 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12376 
   12377 	sc->sc_ctrl &= ~CTRL_LRST;
   12378 	sc->sc_txcw = TXCW_ANE;
   12379 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12380 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12381 	else if (ife->ifm_media & IFM_FDX)
   12382 		sc->sc_txcw |= TXCW_FD;
   12383 	else
   12384 		sc->sc_txcw |= TXCW_HD;
   12385 
   12386 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12387 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12388 
   12389 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12390 		device_xname(sc->sc_dev), sc->sc_txcw));
   12391 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12392 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12393 	CSR_WRITE_FLUSH(sc);
   12394 	delay(1000);
   12395 
   12396 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12397 	signal = wm_tbi_havesignal(sc, ctrl);
   12398 
   12399 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12400 		signal));
   12401 
   12402 	if (signal) {
   12403 		/* Have signal; wait for the link to come up. */
   12404 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12405 			delay(10000);
   12406 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12407 				break;
   12408 		}
   12409 
   12410 		DPRINTF(sc, WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12411 			device_xname(sc->sc_dev), i));
   12412 
   12413 		status = CSR_READ(sc, WMREG_STATUS);
   12414 		DPRINTF(sc, WM_DEBUG_LINK,
   12415 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12416 			device_xname(sc->sc_dev), status, STATUS_LU));
   12417 		if (status & STATUS_LU) {
   12418 			/* Link is up. */
   12419 			DPRINTF(sc, WM_DEBUG_LINK,
   12420 			    ("%s: LINK: set media -> link up %s\n",
   12421 				device_xname(sc->sc_dev),
   12422 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12423 
   12424 			/*
   12425 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12426 			 * so we should update sc->sc_ctrl
   12427 			 */
   12428 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12429 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12430 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12431 			if (status & STATUS_FD)
   12432 				sc->sc_tctl |=
   12433 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12434 			else
   12435 				sc->sc_tctl |=
   12436 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12437 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12438 				sc->sc_fcrtl |= FCRTL_XONE;
   12439 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12440 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12441 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12442 			sc->sc_tbi_linkup = 1;
   12443 		} else {
   12444 			if (i == WM_LINKUP_TIMEOUT)
   12445 				wm_check_for_link(sc);
   12446 			/* Link is down. */
   12447 			DPRINTF(sc, WM_DEBUG_LINK,
   12448 			    ("%s: LINK: set media -> link down\n",
   12449 				device_xname(sc->sc_dev)));
   12450 			sc->sc_tbi_linkup = 0;
   12451 		}
   12452 	} else {
   12453 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12454 			device_xname(sc->sc_dev)));
   12455 		sc->sc_tbi_linkup = 0;
   12456 	}
   12457 
   12458 	wm_tbi_serdes_set_linkled(sc);
   12459 
   12460 	return 0;
   12461 }
   12462 
   12463 /*
   12464  * wm_tbi_mediastatus:	[ifmedia interface function]
   12465  *
   12466  *	Get the current interface media status on a 1000BASE-X device.
   12467  */
   12468 static void
   12469 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12470 {
   12471 	struct wm_softc *sc = ifp->if_softc;
   12472 	uint32_t ctrl, status;
   12473 
   12474 	ifmr->ifm_status = IFM_AVALID;
   12475 	ifmr->ifm_active = IFM_ETHER;
   12476 
   12477 	status = CSR_READ(sc, WMREG_STATUS);
   12478 	if ((status & STATUS_LU) == 0) {
   12479 		ifmr->ifm_active |= IFM_NONE;
   12480 		return;
   12481 	}
   12482 
   12483 	ifmr->ifm_status |= IFM_ACTIVE;
   12484 	/* Only 82545 is LX */
   12485 	if (sc->sc_type == WM_T_82545)
   12486 		ifmr->ifm_active |= IFM_1000_LX;
   12487 	else
   12488 		ifmr->ifm_active |= IFM_1000_SX;
   12489 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12490 		ifmr->ifm_active |= IFM_FDX;
   12491 	else
   12492 		ifmr->ifm_active |= IFM_HDX;
   12493 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12494 	if (ctrl & CTRL_RFCE)
   12495 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12496 	if (ctrl & CTRL_TFCE)
   12497 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12498 }
   12499 
   12500 /* XXX TBI only */
   12501 static int
   12502 wm_check_for_link(struct wm_softc *sc)
   12503 {
   12504 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12505 	uint32_t rxcw;
   12506 	uint32_t ctrl;
   12507 	uint32_t status;
   12508 	bool signal;
   12509 
   12510 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   12511 		device_xname(sc->sc_dev), __func__));
   12512 
   12513 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12514 		/* XXX need some work for >= 82571 */
   12515 		if (sc->sc_type >= WM_T_82571) {
   12516 			sc->sc_tbi_linkup = 1;
   12517 			return 0;
   12518 		}
   12519 	}
   12520 
   12521 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12522 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12523 	status = CSR_READ(sc, WMREG_STATUS);
   12524 	signal = wm_tbi_havesignal(sc, ctrl);
   12525 
   12526 	DPRINTF(sc, WM_DEBUG_LINK,
   12527 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12528 		device_xname(sc->sc_dev), __func__, signal,
   12529 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12530 
   12531 	/*
   12532 	 * SWDPIN   LU RXCW
   12533 	 *	0    0	  0
   12534 	 *	0    0	  1	(should not happen)
   12535 	 *	0    1	  0	(should not happen)
   12536 	 *	0    1	  1	(should not happen)
   12537 	 *	1    0	  0	Disable autonego and force linkup
   12538 	 *	1    0	  1	got /C/ but not linkup yet
   12539 	 *	1    1	  0	(linkup)
   12540 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12541 	 *
   12542 	 */
   12543 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12544 		DPRINTF(sc, WM_DEBUG_LINK,
   12545 		    ("%s: %s: force linkup and fullduplex\n",
   12546 			device_xname(sc->sc_dev), __func__));
   12547 		sc->sc_tbi_linkup = 0;
   12548 		/* Disable auto-negotiation in the TXCW register */
   12549 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12550 
   12551 		/*
   12552 		 * Force link-up and also force full-duplex.
   12553 		 *
   12554 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12555 		 * so we should update sc->sc_ctrl
   12556 		 */
   12557 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12558 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12559 	} else if (((status & STATUS_LU) != 0)
   12560 	    && ((rxcw & RXCW_C) != 0)
   12561 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12562 		sc->sc_tbi_linkup = 1;
   12563 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12564 			device_xname(sc->sc_dev),
   12565 			__func__));
   12566 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12567 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12568 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12569 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   12570 			device_xname(sc->sc_dev), __func__));
   12571 	} else {
   12572 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12573 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12574 			status));
   12575 	}
   12576 
   12577 	return 0;
   12578 }
   12579 
   12580 /*
   12581  * wm_tbi_tick:
   12582  *
   12583  *	Check the link on TBI devices.
   12584  *	This function acts as mii_tick().
   12585  */
   12586 static void
   12587 wm_tbi_tick(struct wm_softc *sc)
   12588 {
   12589 	struct mii_data *mii = &sc->sc_mii;
   12590 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12591 	uint32_t status;
   12592 
   12593 	KASSERT(WM_CORE_LOCKED(sc));
   12594 
   12595 	status = CSR_READ(sc, WMREG_STATUS);
   12596 
   12597 	/* XXX is this needed? */
   12598 	(void)CSR_READ(sc, WMREG_RXCW);
   12599 	(void)CSR_READ(sc, WMREG_CTRL);
   12600 
   12601 	/* set link status */
   12602 	if ((status & STATUS_LU) == 0) {
   12603 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12604 			device_xname(sc->sc_dev)));
   12605 		sc->sc_tbi_linkup = 0;
   12606 	} else if (sc->sc_tbi_linkup == 0) {
   12607 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12608 			device_xname(sc->sc_dev),
   12609 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12610 		sc->sc_tbi_linkup = 1;
   12611 		sc->sc_tbi_serdes_ticks = 0;
   12612 	}
   12613 
   12614 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12615 		goto setled;
   12616 
   12617 	if ((status & STATUS_LU) == 0) {
   12618 		sc->sc_tbi_linkup = 0;
   12619 		/* If the timer expired, retry autonegotiation */
   12620 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12621 		    && (++sc->sc_tbi_serdes_ticks
   12622 			>= sc->sc_tbi_serdes_anegticks)) {
   12623 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12624 				device_xname(sc->sc_dev), __func__));
   12625 			sc->sc_tbi_serdes_ticks = 0;
   12626 			/*
   12627 			 * Reset the link, and let autonegotiation do
   12628 			 * its thing
   12629 			 */
   12630 			sc->sc_ctrl |= CTRL_LRST;
   12631 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12632 			CSR_WRITE_FLUSH(sc);
   12633 			delay(1000);
   12634 			sc->sc_ctrl &= ~CTRL_LRST;
   12635 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12636 			CSR_WRITE_FLUSH(sc);
   12637 			delay(1000);
   12638 			CSR_WRITE(sc, WMREG_TXCW,
   12639 			    sc->sc_txcw & ~TXCW_ANE);
   12640 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12641 		}
   12642 	}
   12643 
   12644 setled:
   12645 	wm_tbi_serdes_set_linkled(sc);
   12646 }
   12647 
   12648 /* SERDES related */
   12649 static void
   12650 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12651 {
   12652 	uint32_t reg;
   12653 
   12654 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12655 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12656 		return;
   12657 
   12658 	/* Enable PCS to turn on link */
   12659 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12660 	reg |= PCS_CFG_PCS_EN;
   12661 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12662 
   12663 	/* Power up the laser */
   12664 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12665 	reg &= ~CTRL_EXT_SWDPIN(3);
   12666 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12667 
   12668 	/* Flush the write to verify completion */
   12669 	CSR_WRITE_FLUSH(sc);
   12670 	delay(1000);
   12671 }
   12672 
   12673 static int
   12674 wm_serdes_mediachange(struct ifnet *ifp)
   12675 {
   12676 	struct wm_softc *sc = ifp->if_softc;
   12677 	bool pcs_autoneg = true; /* XXX */
   12678 	uint32_t ctrl_ext, pcs_lctl, reg;
   12679 
   12680 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12681 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12682 		return 0;
   12683 
   12684 	/* XXX Currently, this function is not called on 8257[12] */
   12685 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12686 	    || (sc->sc_type >= WM_T_82575))
   12687 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12688 
   12689 	/* Power on the sfp cage if present */
   12690 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12691 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12692 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12693 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12694 
   12695 	sc->sc_ctrl |= CTRL_SLU;
   12696 
   12697 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   12698 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12699 
   12700 		reg = CSR_READ(sc, WMREG_CONNSW);
   12701 		reg |= CONNSW_ENRGSRC;
   12702 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   12703 	}
   12704 
   12705 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12706 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12707 	case CTRL_EXT_LINK_MODE_SGMII:
   12708 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12709 		pcs_autoneg = true;
   12710 		/* Autoneg time out should be disabled for SGMII mode */
   12711 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12712 		break;
   12713 	case CTRL_EXT_LINK_MODE_1000KX:
   12714 		pcs_autoneg = false;
   12715 		/* FALLTHROUGH */
   12716 	default:
   12717 		if ((sc->sc_type == WM_T_82575)
   12718 		    || (sc->sc_type == WM_T_82576)) {
   12719 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12720 				pcs_autoneg = false;
   12721 		}
   12722 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12723 		    | CTRL_FRCFDX;
   12724 
   12725 		/* Set speed of 1000/Full if speed/duplex is forced */
   12726 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12727 	}
   12728 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12729 
   12730 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12731 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12732 
   12733 	if (pcs_autoneg) {
   12734 		/* Set PCS register for autoneg */
   12735 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12736 
   12737 		/* Disable force flow control for autoneg */
   12738 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12739 
   12740 		/* Configure flow control advertisement for autoneg */
   12741 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12742 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12743 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12744 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12745 	} else
   12746 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12747 
   12748 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12749 
   12750 	return 0;
   12751 }
   12752 
   12753 static void
   12754 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12755 {
   12756 	struct wm_softc *sc = ifp->if_softc;
   12757 	struct mii_data *mii = &sc->sc_mii;
   12758 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12759 	uint32_t pcs_adv, pcs_lpab, reg;
   12760 
   12761 	ifmr->ifm_status = IFM_AVALID;
   12762 	ifmr->ifm_active = IFM_ETHER;
   12763 
   12764 	/* Check PCS */
   12765 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12766 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12767 		ifmr->ifm_active |= IFM_NONE;
   12768 		sc->sc_tbi_linkup = 0;
   12769 		goto setled;
   12770 	}
   12771 
   12772 	sc->sc_tbi_linkup = 1;
   12773 	ifmr->ifm_status |= IFM_ACTIVE;
   12774 	if (sc->sc_type == WM_T_I354) {
   12775 		uint32_t status;
   12776 
   12777 		status = CSR_READ(sc, WMREG_STATUS);
   12778 		if (((status & STATUS_2P5_SKU) != 0)
   12779 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12780 			ifmr->ifm_active |= IFM_2500_KX;
   12781 		} else
   12782 			ifmr->ifm_active |= IFM_1000_KX;
   12783 	} else {
   12784 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12785 		case PCS_LSTS_SPEED_10:
   12786 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12787 			break;
   12788 		case PCS_LSTS_SPEED_100:
   12789 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12790 			break;
   12791 		case PCS_LSTS_SPEED_1000:
   12792 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12793 			break;
   12794 		default:
   12795 			device_printf(sc->sc_dev, "Unknown speed\n");
   12796 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12797 			break;
   12798 		}
   12799 	}
   12800 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   12801 	if ((reg & PCS_LSTS_FDX) != 0)
   12802 		ifmr->ifm_active |= IFM_FDX;
   12803 	else
   12804 		ifmr->ifm_active |= IFM_HDX;
   12805 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12806 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12807 		/* Check flow */
   12808 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12809 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12810 			DPRINTF(sc, WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12811 			goto setled;
   12812 		}
   12813 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12814 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12815 		DPRINTF(sc, WM_DEBUG_LINK,
   12816 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12817 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12818 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12819 			mii->mii_media_active |= IFM_FLOW
   12820 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12821 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12822 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12823 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12824 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12825 			mii->mii_media_active |= IFM_FLOW
   12826 			    | IFM_ETH_TXPAUSE;
   12827 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12828 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12829 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12830 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12831 			mii->mii_media_active |= IFM_FLOW
   12832 			    | IFM_ETH_RXPAUSE;
   12833 		}
   12834 	}
   12835 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12836 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12837 setled:
   12838 	wm_tbi_serdes_set_linkled(sc);
   12839 }
   12840 
   12841 /*
   12842  * wm_serdes_tick:
   12843  *
   12844  *	Check the link on serdes devices.
   12845  */
   12846 static void
   12847 wm_serdes_tick(struct wm_softc *sc)
   12848 {
   12849 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12850 	struct mii_data *mii = &sc->sc_mii;
   12851 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12852 	uint32_t reg;
   12853 
   12854 	KASSERT(WM_CORE_LOCKED(sc));
   12855 
   12856 	mii->mii_media_status = IFM_AVALID;
   12857 	mii->mii_media_active = IFM_ETHER;
   12858 
   12859 	/* Check PCS */
   12860 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12861 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12862 		mii->mii_media_status |= IFM_ACTIVE;
   12863 		sc->sc_tbi_linkup = 1;
   12864 		sc->sc_tbi_serdes_ticks = 0;
   12865 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12866 		if ((reg & PCS_LSTS_FDX) != 0)
   12867 			mii->mii_media_active |= IFM_FDX;
   12868 		else
   12869 			mii->mii_media_active |= IFM_HDX;
   12870 	} else {
   12871 		mii->mii_media_status |= IFM_NONE;
   12872 		sc->sc_tbi_linkup = 0;
   12873 		/* If the timer expired, retry autonegotiation */
   12874 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12875 		    && (++sc->sc_tbi_serdes_ticks
   12876 			>= sc->sc_tbi_serdes_anegticks)) {
   12877 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12878 				device_xname(sc->sc_dev), __func__));
   12879 			sc->sc_tbi_serdes_ticks = 0;
   12880 			/* XXX */
   12881 			wm_serdes_mediachange(ifp);
   12882 		}
   12883 	}
   12884 
   12885 	wm_tbi_serdes_set_linkled(sc);
   12886 }
   12887 
   12888 /* SFP related */
   12889 
   12890 static int
   12891 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12892 {
   12893 	uint32_t i2ccmd;
   12894 	int i;
   12895 
   12896 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12897 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12898 
   12899 	/* Poll the ready bit */
   12900 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12901 		delay(50);
   12902 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12903 		if (i2ccmd & I2CCMD_READY)
   12904 			break;
   12905 	}
   12906 	if ((i2ccmd & I2CCMD_READY) == 0)
   12907 		return -1;
   12908 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12909 		return -1;
   12910 
   12911 	*data = i2ccmd & 0x00ff;
   12912 
   12913 	return 0;
   12914 }
   12915 
   12916 static uint32_t
   12917 wm_sfp_get_media_type(struct wm_softc *sc)
   12918 {
   12919 	uint32_t ctrl_ext;
   12920 	uint8_t val = 0;
   12921 	int timeout = 3;
   12922 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12923 	int rv = -1;
   12924 
   12925 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12926 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12927 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12928 	CSR_WRITE_FLUSH(sc);
   12929 
   12930 	/* Read SFP module data */
   12931 	while (timeout) {
   12932 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12933 		if (rv == 0)
   12934 			break;
   12935 		delay(100*1000); /* XXX too big */
   12936 		timeout--;
   12937 	}
   12938 	if (rv != 0)
   12939 		goto out;
   12940 
   12941 	switch (val) {
   12942 	case SFF_SFP_ID_SFF:
   12943 		aprint_normal_dev(sc->sc_dev,
   12944 		    "Module/Connector soldered to board\n");
   12945 		break;
   12946 	case SFF_SFP_ID_SFP:
   12947 		sc->sc_flags |= WM_F_SFP;
   12948 		break;
   12949 	case SFF_SFP_ID_UNKNOWN:
   12950 		goto out;
   12951 	default:
   12952 		break;
   12953 	}
   12954 
   12955 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12956 	if (rv != 0)
   12957 		goto out;
   12958 
   12959 	sc->sc_sfptype = val;
   12960 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12961 		mediatype = WM_MEDIATYPE_SERDES;
   12962 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12963 		sc->sc_flags |= WM_F_SGMII;
   12964 		mediatype = WM_MEDIATYPE_COPPER;
   12965 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12966 		sc->sc_flags |= WM_F_SGMII;
   12967 		mediatype = WM_MEDIATYPE_SERDES;
   12968 	} else {
   12969 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   12970 		    __func__, sc->sc_sfptype);
   12971 		sc->sc_sfptype = 0; /* XXX unknown */
   12972 	}
   12973 
   12974 out:
   12975 	/* Restore I2C interface setting */
   12976 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12977 
   12978 	return mediatype;
   12979 }
   12980 
   12981 /*
   12982  * NVM related.
   12983  * Microwire, SPI (w/wo EERD) and Flash.
   12984  */
   12985 
   12986 /* Both spi and uwire */
   12987 
   12988 /*
   12989  * wm_eeprom_sendbits:
   12990  *
   12991  *	Send a series of bits to the EEPROM.
   12992  */
   12993 static void
   12994 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12995 {
   12996 	uint32_t reg;
   12997 	int x;
   12998 
   12999 	reg = CSR_READ(sc, WMREG_EECD);
   13000 
   13001 	for (x = nbits; x > 0; x--) {
   13002 		if (bits & (1U << (x - 1)))
   13003 			reg |= EECD_DI;
   13004 		else
   13005 			reg &= ~EECD_DI;
   13006 		CSR_WRITE(sc, WMREG_EECD, reg);
   13007 		CSR_WRITE_FLUSH(sc);
   13008 		delay(2);
   13009 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13010 		CSR_WRITE_FLUSH(sc);
   13011 		delay(2);
   13012 		CSR_WRITE(sc, WMREG_EECD, reg);
   13013 		CSR_WRITE_FLUSH(sc);
   13014 		delay(2);
   13015 	}
   13016 }
   13017 
   13018 /*
   13019  * wm_eeprom_recvbits:
   13020  *
   13021  *	Receive a series of bits from the EEPROM.
   13022  */
   13023 static void
   13024 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13025 {
   13026 	uint32_t reg, val;
   13027 	int x;
   13028 
   13029 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13030 
   13031 	val = 0;
   13032 	for (x = nbits; x > 0; x--) {
   13033 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13034 		CSR_WRITE_FLUSH(sc);
   13035 		delay(2);
   13036 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13037 			val |= (1U << (x - 1));
   13038 		CSR_WRITE(sc, WMREG_EECD, reg);
   13039 		CSR_WRITE_FLUSH(sc);
   13040 		delay(2);
   13041 	}
   13042 	*valp = val;
   13043 }
   13044 
   13045 /* Microwire */
   13046 
   13047 /*
   13048  * wm_nvm_read_uwire:
   13049  *
   13050  *	Read a word from the EEPROM using the MicroWire protocol.
   13051  */
   13052 static int
   13053 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13054 {
   13055 	uint32_t reg, val;
   13056 	int i;
   13057 
   13058 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13059 		device_xname(sc->sc_dev), __func__));
   13060 
   13061 	if (sc->nvm.acquire(sc) != 0)
   13062 		return -1;
   13063 
   13064 	for (i = 0; i < wordcnt; i++) {
   13065 		/* Clear SK and DI. */
   13066 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13067 		CSR_WRITE(sc, WMREG_EECD, reg);
   13068 
   13069 		/*
   13070 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13071 		 * and Xen.
   13072 		 *
   13073 		 * We use this workaround only for 82540 because qemu's
   13074 		 * e1000 act as 82540.
   13075 		 */
   13076 		if (sc->sc_type == WM_T_82540) {
   13077 			reg |= EECD_SK;
   13078 			CSR_WRITE(sc, WMREG_EECD, reg);
   13079 			reg &= ~EECD_SK;
   13080 			CSR_WRITE(sc, WMREG_EECD, reg);
   13081 			CSR_WRITE_FLUSH(sc);
   13082 			delay(2);
   13083 		}
   13084 		/* XXX: end of workaround */
   13085 
   13086 		/* Set CHIP SELECT. */
   13087 		reg |= EECD_CS;
   13088 		CSR_WRITE(sc, WMREG_EECD, reg);
   13089 		CSR_WRITE_FLUSH(sc);
   13090 		delay(2);
   13091 
   13092 		/* Shift in the READ command. */
   13093 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13094 
   13095 		/* Shift in address. */
   13096 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13097 
   13098 		/* Shift out the data. */
   13099 		wm_eeprom_recvbits(sc, &val, 16);
   13100 		data[i] = val & 0xffff;
   13101 
   13102 		/* Clear CHIP SELECT. */
   13103 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13104 		CSR_WRITE(sc, WMREG_EECD, reg);
   13105 		CSR_WRITE_FLUSH(sc);
   13106 		delay(2);
   13107 	}
   13108 
   13109 	sc->nvm.release(sc);
   13110 	return 0;
   13111 }
   13112 
   13113 /* SPI */
   13114 
   13115 /*
   13116  * Set SPI and FLASH related information from the EECD register.
   13117  * For 82541 and 82547, the word size is taken from EEPROM.
   13118  */
   13119 static int
   13120 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13121 {
   13122 	int size;
   13123 	uint32_t reg;
   13124 	uint16_t data;
   13125 
   13126 	reg = CSR_READ(sc, WMREG_EECD);
   13127 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13128 
   13129 	/* Read the size of NVM from EECD by default */
   13130 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13131 	switch (sc->sc_type) {
   13132 	case WM_T_82541:
   13133 	case WM_T_82541_2:
   13134 	case WM_T_82547:
   13135 	case WM_T_82547_2:
   13136 		/* Set dummy value to access EEPROM */
   13137 		sc->sc_nvm_wordsize = 64;
   13138 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13139 			aprint_error_dev(sc->sc_dev,
   13140 			    "%s: failed to read EEPROM size\n", __func__);
   13141 		}
   13142 		reg = data;
   13143 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13144 		if (size == 0)
   13145 			size = 6; /* 64 word size */
   13146 		else
   13147 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13148 		break;
   13149 	case WM_T_80003:
   13150 	case WM_T_82571:
   13151 	case WM_T_82572:
   13152 	case WM_T_82573: /* SPI case */
   13153 	case WM_T_82574: /* SPI case */
   13154 	case WM_T_82583: /* SPI case */
   13155 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13156 		if (size > 14)
   13157 			size = 14;
   13158 		break;
   13159 	case WM_T_82575:
   13160 	case WM_T_82576:
   13161 	case WM_T_82580:
   13162 	case WM_T_I350:
   13163 	case WM_T_I354:
   13164 	case WM_T_I210:
   13165 	case WM_T_I211:
   13166 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13167 		if (size > 15)
   13168 			size = 15;
   13169 		break;
   13170 	default:
   13171 		aprint_error_dev(sc->sc_dev,
   13172 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13173 		return -1;
   13174 		break;
   13175 	}
   13176 
   13177 	sc->sc_nvm_wordsize = 1 << size;
   13178 
   13179 	return 0;
   13180 }
   13181 
   13182 /*
   13183  * wm_nvm_ready_spi:
   13184  *
   13185  *	Wait for a SPI EEPROM to be ready for commands.
   13186  */
   13187 static int
   13188 wm_nvm_ready_spi(struct wm_softc *sc)
   13189 {
   13190 	uint32_t val;
   13191 	int usec;
   13192 
   13193 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13194 		device_xname(sc->sc_dev), __func__));
   13195 
   13196 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13197 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13198 		wm_eeprom_recvbits(sc, &val, 8);
   13199 		if ((val & SPI_SR_RDY) == 0)
   13200 			break;
   13201 	}
   13202 	if (usec >= SPI_MAX_RETRIES) {
   13203 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13204 		return -1;
   13205 	}
   13206 	return 0;
   13207 }
   13208 
   13209 /*
   13210  * wm_nvm_read_spi:
   13211  *
   13212  *	Read a work from the EEPROM using the SPI protocol.
   13213  */
   13214 static int
   13215 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13216 {
   13217 	uint32_t reg, val;
   13218 	int i;
   13219 	uint8_t opc;
   13220 	int rv = 0;
   13221 
   13222 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13223 		device_xname(sc->sc_dev), __func__));
   13224 
   13225 	if (sc->nvm.acquire(sc) != 0)
   13226 		return -1;
   13227 
   13228 	/* Clear SK and CS. */
   13229 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13230 	CSR_WRITE(sc, WMREG_EECD, reg);
   13231 	CSR_WRITE_FLUSH(sc);
   13232 	delay(2);
   13233 
   13234 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13235 		goto out;
   13236 
   13237 	/* Toggle CS to flush commands. */
   13238 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13239 	CSR_WRITE_FLUSH(sc);
   13240 	delay(2);
   13241 	CSR_WRITE(sc, WMREG_EECD, reg);
   13242 	CSR_WRITE_FLUSH(sc);
   13243 	delay(2);
   13244 
   13245 	opc = SPI_OPC_READ;
   13246 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13247 		opc |= SPI_OPC_A8;
   13248 
   13249 	wm_eeprom_sendbits(sc, opc, 8);
   13250 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13251 
   13252 	for (i = 0; i < wordcnt; i++) {
   13253 		wm_eeprom_recvbits(sc, &val, 16);
   13254 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13255 	}
   13256 
   13257 	/* Raise CS and clear SK. */
   13258 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13259 	CSR_WRITE(sc, WMREG_EECD, reg);
   13260 	CSR_WRITE_FLUSH(sc);
   13261 	delay(2);
   13262 
   13263 out:
   13264 	sc->nvm.release(sc);
   13265 	return rv;
   13266 }
   13267 
   13268 /* Using with EERD */
   13269 
   13270 static int
   13271 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13272 {
   13273 	uint32_t attempts = 100000;
   13274 	uint32_t i, reg = 0;
   13275 	int32_t done = -1;
   13276 
   13277 	for (i = 0; i < attempts; i++) {
   13278 		reg = CSR_READ(sc, rw);
   13279 
   13280 		if (reg & EERD_DONE) {
   13281 			done = 0;
   13282 			break;
   13283 		}
   13284 		delay(5);
   13285 	}
   13286 
   13287 	return done;
   13288 }
   13289 
   13290 static int
   13291 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13292 {
   13293 	int i, eerd = 0;
   13294 	int rv = 0;
   13295 
   13296 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13297 		device_xname(sc->sc_dev), __func__));
   13298 
   13299 	if (sc->nvm.acquire(sc) != 0)
   13300 		return -1;
   13301 
   13302 	for (i = 0; i < wordcnt; i++) {
   13303 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13304 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13305 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13306 		if (rv != 0) {
   13307 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13308 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13309 			break;
   13310 		}
   13311 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13312 	}
   13313 
   13314 	sc->nvm.release(sc);
   13315 	return rv;
   13316 }
   13317 
   13318 /* Flash */
   13319 
   13320 static int
   13321 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13322 {
   13323 	uint32_t eecd;
   13324 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13325 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13326 	uint32_t nvm_dword = 0;
   13327 	uint8_t sig_byte = 0;
   13328 	int rv;
   13329 
   13330 	switch (sc->sc_type) {
   13331 	case WM_T_PCH_SPT:
   13332 	case WM_T_PCH_CNP:
   13333 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13334 		act_offset = ICH_NVM_SIG_WORD * 2;
   13335 
   13336 		/* Set bank to 0 in case flash read fails. */
   13337 		*bank = 0;
   13338 
   13339 		/* Check bank 0 */
   13340 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13341 		if (rv != 0)
   13342 			return rv;
   13343 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13344 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13345 			*bank = 0;
   13346 			return 0;
   13347 		}
   13348 
   13349 		/* Check bank 1 */
   13350 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13351 		    &nvm_dword);
   13352 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13353 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13354 			*bank = 1;
   13355 			return 0;
   13356 		}
   13357 		aprint_error_dev(sc->sc_dev,
   13358 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13359 		return -1;
   13360 	case WM_T_ICH8:
   13361 	case WM_T_ICH9:
   13362 		eecd = CSR_READ(sc, WMREG_EECD);
   13363 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13364 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13365 			return 0;
   13366 		}
   13367 		/* FALLTHROUGH */
   13368 	default:
   13369 		/* Default to 0 */
   13370 		*bank = 0;
   13371 
   13372 		/* Check bank 0 */
   13373 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13374 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13375 			*bank = 0;
   13376 			return 0;
   13377 		}
   13378 
   13379 		/* Check bank 1 */
   13380 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13381 		    &sig_byte);
   13382 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13383 			*bank = 1;
   13384 			return 0;
   13385 		}
   13386 	}
   13387 
   13388 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13389 		device_xname(sc->sc_dev)));
   13390 	return -1;
   13391 }
   13392 
   13393 /******************************************************************************
   13394  * This function does initial flash setup so that a new read/write/erase cycle
   13395  * can be started.
   13396  *
   13397  * sc - The pointer to the hw structure
   13398  ****************************************************************************/
   13399 static int32_t
   13400 wm_ich8_cycle_init(struct wm_softc *sc)
   13401 {
   13402 	uint16_t hsfsts;
   13403 	int32_t error = 1;
   13404 	int32_t i     = 0;
   13405 
   13406 	if (sc->sc_type >= WM_T_PCH_SPT)
   13407 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13408 	else
   13409 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13410 
   13411 	/* May be check the Flash Des Valid bit in Hw status */
   13412 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13413 		return error;
   13414 
   13415 	/* Clear FCERR in Hw status by writing 1 */
   13416 	/* Clear DAEL in Hw status by writing a 1 */
   13417 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13418 
   13419 	if (sc->sc_type >= WM_T_PCH_SPT)
   13420 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13421 	else
   13422 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13423 
   13424 	/*
   13425 	 * Either we should have a hardware SPI cycle in progress bit to check
   13426 	 * against, in order to start a new cycle or FDONE bit should be
   13427 	 * changed in the hardware so that it is 1 after hardware reset, which
   13428 	 * can then be used as an indication whether a cycle is in progress or
   13429 	 * has been completed .. we should also have some software semaphore
   13430 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13431 	 * threads access to those bits can be sequentiallized or a way so that
   13432 	 * 2 threads don't start the cycle at the same time
   13433 	 */
   13434 
   13435 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13436 		/*
   13437 		 * There is no cycle running at present, so we can start a
   13438 		 * cycle
   13439 		 */
   13440 
   13441 		/* Begin by setting Flash Cycle Done. */
   13442 		hsfsts |= HSFSTS_DONE;
   13443 		if (sc->sc_type >= WM_T_PCH_SPT)
   13444 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13445 			    hsfsts & 0xffffUL);
   13446 		else
   13447 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13448 		error = 0;
   13449 	} else {
   13450 		/*
   13451 		 * Otherwise poll for sometime so the current cycle has a
   13452 		 * chance to end before giving up.
   13453 		 */
   13454 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13455 			if (sc->sc_type >= WM_T_PCH_SPT)
   13456 				hsfsts = ICH8_FLASH_READ32(sc,
   13457 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13458 			else
   13459 				hsfsts = ICH8_FLASH_READ16(sc,
   13460 				    ICH_FLASH_HSFSTS);
   13461 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13462 				error = 0;
   13463 				break;
   13464 			}
   13465 			delay(1);
   13466 		}
   13467 		if (error == 0) {
   13468 			/*
   13469 			 * Successful in waiting for previous cycle to timeout,
   13470 			 * now set the Flash Cycle Done.
   13471 			 */
   13472 			hsfsts |= HSFSTS_DONE;
   13473 			if (sc->sc_type >= WM_T_PCH_SPT)
   13474 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13475 				    hsfsts & 0xffffUL);
   13476 			else
   13477 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13478 				    hsfsts);
   13479 		}
   13480 	}
   13481 	return error;
   13482 }
   13483 
   13484 /******************************************************************************
   13485  * This function starts a flash cycle and waits for its completion
   13486  *
   13487  * sc - The pointer to the hw structure
   13488  ****************************************************************************/
   13489 static int32_t
   13490 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13491 {
   13492 	uint16_t hsflctl;
   13493 	uint16_t hsfsts;
   13494 	int32_t error = 1;
   13495 	uint32_t i = 0;
   13496 
   13497 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13498 	if (sc->sc_type >= WM_T_PCH_SPT)
   13499 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13500 	else
   13501 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13502 	hsflctl |= HSFCTL_GO;
   13503 	if (sc->sc_type >= WM_T_PCH_SPT)
   13504 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13505 		    (uint32_t)hsflctl << 16);
   13506 	else
   13507 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13508 
   13509 	/* Wait till FDONE bit is set to 1 */
   13510 	do {
   13511 		if (sc->sc_type >= WM_T_PCH_SPT)
   13512 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13513 			    & 0xffffUL;
   13514 		else
   13515 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13516 		if (hsfsts & HSFSTS_DONE)
   13517 			break;
   13518 		delay(1);
   13519 		i++;
   13520 	} while (i < timeout);
   13521 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13522 		error = 0;
   13523 
   13524 	return error;
   13525 }
   13526 
   13527 /******************************************************************************
   13528  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13529  *
   13530  * sc - The pointer to the hw structure
   13531  * index - The index of the byte or word to read.
   13532  * size - Size of data to read, 1=byte 2=word, 4=dword
   13533  * data - Pointer to the word to store the value read.
   13534  *****************************************************************************/
   13535 static int32_t
   13536 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13537     uint32_t size, uint32_t *data)
   13538 {
   13539 	uint16_t hsfsts;
   13540 	uint16_t hsflctl;
   13541 	uint32_t flash_linear_address;
   13542 	uint32_t flash_data = 0;
   13543 	int32_t error = 1;
   13544 	int32_t count = 0;
   13545 
   13546 	if (size < 1  || size > 4 || data == 0x0 ||
   13547 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13548 		return error;
   13549 
   13550 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13551 	    sc->sc_ich8_flash_base;
   13552 
   13553 	do {
   13554 		delay(1);
   13555 		/* Steps */
   13556 		error = wm_ich8_cycle_init(sc);
   13557 		if (error)
   13558 			break;
   13559 
   13560 		if (sc->sc_type >= WM_T_PCH_SPT)
   13561 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13562 			    >> 16;
   13563 		else
   13564 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13565 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13566 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13567 		    & HSFCTL_BCOUNT_MASK;
   13568 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13569 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13570 			/*
   13571 			 * In SPT, This register is in Lan memory space, not
   13572 			 * flash. Therefore, only 32 bit access is supported.
   13573 			 */
   13574 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13575 			    (uint32_t)hsflctl << 16);
   13576 		} else
   13577 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13578 
   13579 		/*
   13580 		 * Write the last 24 bits of index into Flash Linear address
   13581 		 * field in Flash Address
   13582 		 */
   13583 		/* TODO: TBD maybe check the index against the size of flash */
   13584 
   13585 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13586 
   13587 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13588 
   13589 		/*
   13590 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13591 		 * the whole sequence a few more times, else read in (shift in)
   13592 		 * the Flash Data0, the order is least significant byte first
   13593 		 * msb to lsb
   13594 		 */
   13595 		if (error == 0) {
   13596 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13597 			if (size == 1)
   13598 				*data = (uint8_t)(flash_data & 0x000000FF);
   13599 			else if (size == 2)
   13600 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13601 			else if (size == 4)
   13602 				*data = (uint32_t)flash_data;
   13603 			break;
   13604 		} else {
   13605 			/*
   13606 			 * If we've gotten here, then things are probably
   13607 			 * completely hosed, but if the error condition is
   13608 			 * detected, it won't hurt to give it another try...
   13609 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13610 			 */
   13611 			if (sc->sc_type >= WM_T_PCH_SPT)
   13612 				hsfsts = ICH8_FLASH_READ32(sc,
   13613 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13614 			else
   13615 				hsfsts = ICH8_FLASH_READ16(sc,
   13616 				    ICH_FLASH_HSFSTS);
   13617 
   13618 			if (hsfsts & HSFSTS_ERR) {
   13619 				/* Repeat for some time before giving up. */
   13620 				continue;
   13621 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13622 				break;
   13623 		}
   13624 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13625 
   13626 	return error;
   13627 }
   13628 
   13629 /******************************************************************************
   13630  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13631  *
   13632  * sc - pointer to wm_hw structure
   13633  * index - The index of the byte to read.
   13634  * data - Pointer to a byte to store the value read.
   13635  *****************************************************************************/
   13636 static int32_t
   13637 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13638 {
   13639 	int32_t status;
   13640 	uint32_t word = 0;
   13641 
   13642 	status = wm_read_ich8_data(sc, index, 1, &word);
   13643 	if (status == 0)
   13644 		*data = (uint8_t)word;
   13645 	else
   13646 		*data = 0;
   13647 
   13648 	return status;
   13649 }
   13650 
   13651 /******************************************************************************
   13652  * Reads a word from the NVM using the ICH8 flash access registers.
   13653  *
   13654  * sc - pointer to wm_hw structure
   13655  * index - The starting byte index of the word to read.
   13656  * data - Pointer to a word to store the value read.
   13657  *****************************************************************************/
   13658 static int32_t
   13659 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13660 {
   13661 	int32_t status;
   13662 	uint32_t word = 0;
   13663 
   13664 	status = wm_read_ich8_data(sc, index, 2, &word);
   13665 	if (status == 0)
   13666 		*data = (uint16_t)word;
   13667 	else
   13668 		*data = 0;
   13669 
   13670 	return status;
   13671 }
   13672 
   13673 /******************************************************************************
   13674  * Reads a dword from the NVM using the ICH8 flash access registers.
   13675  *
   13676  * sc - pointer to wm_hw structure
   13677  * index - The starting byte index of the word to read.
   13678  * data - Pointer to a word to store the value read.
   13679  *****************************************************************************/
   13680 static int32_t
   13681 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13682 {
   13683 	int32_t status;
   13684 
   13685 	status = wm_read_ich8_data(sc, index, 4, data);
   13686 	return status;
   13687 }
   13688 
   13689 /******************************************************************************
   13690  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13691  * register.
   13692  *
   13693  * sc - Struct containing variables accessed by shared code
   13694  * offset - offset of word in the EEPROM to read
   13695  * data - word read from the EEPROM
   13696  * words - number of words to read
   13697  *****************************************************************************/
   13698 static int
   13699 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13700 {
   13701 	int32_t	 rv = 0;
   13702 	uint32_t flash_bank = 0;
   13703 	uint32_t act_offset = 0;
   13704 	uint32_t bank_offset = 0;
   13705 	uint16_t word = 0;
   13706 	uint16_t i = 0;
   13707 
   13708 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13709 		device_xname(sc->sc_dev), __func__));
   13710 
   13711 	if (sc->nvm.acquire(sc) != 0)
   13712 		return -1;
   13713 
   13714 	/*
   13715 	 * We need to know which is the valid flash bank.  In the event
   13716 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13717 	 * managing flash_bank. So it cannot be trusted and needs
   13718 	 * to be updated with each read.
   13719 	 */
   13720 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13721 	if (rv) {
   13722 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13723 			device_xname(sc->sc_dev)));
   13724 		flash_bank = 0;
   13725 	}
   13726 
   13727 	/*
   13728 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13729 	 * size
   13730 	 */
   13731 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13732 
   13733 	for (i = 0; i < words; i++) {
   13734 		/* The NVM part needs a byte offset, hence * 2 */
   13735 		act_offset = bank_offset + ((offset + i) * 2);
   13736 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13737 		if (rv) {
   13738 			aprint_error_dev(sc->sc_dev,
   13739 			    "%s: failed to read NVM\n", __func__);
   13740 			break;
   13741 		}
   13742 		data[i] = word;
   13743 	}
   13744 
   13745 	sc->nvm.release(sc);
   13746 	return rv;
   13747 }
   13748 
   13749 /******************************************************************************
   13750  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13751  * register.
   13752  *
   13753  * sc - Struct containing variables accessed by shared code
   13754  * offset - offset of word in the EEPROM to read
   13755  * data - word read from the EEPROM
   13756  * words - number of words to read
   13757  *****************************************************************************/
   13758 static int
   13759 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13760 {
   13761 	int32_t	 rv = 0;
   13762 	uint32_t flash_bank = 0;
   13763 	uint32_t act_offset = 0;
   13764 	uint32_t bank_offset = 0;
   13765 	uint32_t dword = 0;
   13766 	uint16_t i = 0;
   13767 
   13768 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13769 		device_xname(sc->sc_dev), __func__));
   13770 
   13771 	if (sc->nvm.acquire(sc) != 0)
   13772 		return -1;
   13773 
   13774 	/*
   13775 	 * We need to know which is the valid flash bank.  In the event
   13776 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13777 	 * managing flash_bank. So it cannot be trusted and needs
   13778 	 * to be updated with each read.
   13779 	 */
   13780 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13781 	if (rv) {
   13782 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13783 			device_xname(sc->sc_dev)));
   13784 		flash_bank = 0;
   13785 	}
   13786 
   13787 	/*
   13788 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13789 	 * size
   13790 	 */
   13791 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13792 
   13793 	for (i = 0; i < words; i++) {
   13794 		/* The NVM part needs a byte offset, hence * 2 */
   13795 		act_offset = bank_offset + ((offset + i) * 2);
   13796 		/* but we must read dword aligned, so mask ... */
   13797 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13798 		if (rv) {
   13799 			aprint_error_dev(sc->sc_dev,
   13800 			    "%s: failed to read NVM\n", __func__);
   13801 			break;
   13802 		}
   13803 		/* ... and pick out low or high word */
   13804 		if ((act_offset & 0x2) == 0)
   13805 			data[i] = (uint16_t)(dword & 0xFFFF);
   13806 		else
   13807 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13808 	}
   13809 
   13810 	sc->nvm.release(sc);
   13811 	return rv;
   13812 }
   13813 
   13814 /* iNVM */
   13815 
   13816 static int
   13817 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13818 {
   13819 	int32_t	 rv = 0;
   13820 	uint32_t invm_dword;
   13821 	uint16_t i;
   13822 	uint8_t record_type, word_address;
   13823 
   13824 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13825 		device_xname(sc->sc_dev), __func__));
   13826 
   13827 	for (i = 0; i < INVM_SIZE; i++) {
   13828 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13829 		/* Get record type */
   13830 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13831 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13832 			break;
   13833 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13834 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13835 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13836 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13837 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13838 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13839 			if (word_address == address) {
   13840 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13841 				rv = 0;
   13842 				break;
   13843 			}
   13844 		}
   13845 	}
   13846 
   13847 	return rv;
   13848 }
   13849 
   13850 static int
   13851 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13852 {
   13853 	int rv = 0;
   13854 	int i;
   13855 
   13856 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13857 		device_xname(sc->sc_dev), __func__));
   13858 
   13859 	if (sc->nvm.acquire(sc) != 0)
   13860 		return -1;
   13861 
   13862 	for (i = 0; i < words; i++) {
   13863 		switch (offset + i) {
   13864 		case NVM_OFF_MACADDR:
   13865 		case NVM_OFF_MACADDR1:
   13866 		case NVM_OFF_MACADDR2:
   13867 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13868 			if (rv != 0) {
   13869 				data[i] = 0xffff;
   13870 				rv = -1;
   13871 			}
   13872 			break;
   13873 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   13874 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13875 			if (rv != 0) {
   13876 				*data = INVM_DEFAULT_AL;
   13877 				rv = 0;
   13878 			}
   13879 			break;
   13880 		case NVM_OFF_CFG2:
   13881 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13882 			if (rv != 0) {
   13883 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13884 				rv = 0;
   13885 			}
   13886 			break;
   13887 		case NVM_OFF_CFG4:
   13888 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13889 			if (rv != 0) {
   13890 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13891 				rv = 0;
   13892 			}
   13893 			break;
   13894 		case NVM_OFF_LED_1_CFG:
   13895 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13896 			if (rv != 0) {
   13897 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13898 				rv = 0;
   13899 			}
   13900 			break;
   13901 		case NVM_OFF_LED_0_2_CFG:
   13902 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13903 			if (rv != 0) {
   13904 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13905 				rv = 0;
   13906 			}
   13907 			break;
   13908 		case NVM_OFF_ID_LED_SETTINGS:
   13909 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13910 			if (rv != 0) {
   13911 				*data = ID_LED_RESERVED_FFFF;
   13912 				rv = 0;
   13913 			}
   13914 			break;
   13915 		default:
   13916 			DPRINTF(sc, WM_DEBUG_NVM,
   13917 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13918 			*data = NVM_RESERVED_WORD;
   13919 			break;
   13920 		}
   13921 	}
   13922 
   13923 	sc->nvm.release(sc);
   13924 	return rv;
   13925 }
   13926 
   13927 /* Lock, detecting NVM type, validate checksum, version and read */
   13928 
   13929 static int
   13930 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13931 {
   13932 	uint32_t eecd = 0;
   13933 
   13934 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13935 	    || sc->sc_type == WM_T_82583) {
   13936 		eecd = CSR_READ(sc, WMREG_EECD);
   13937 
   13938 		/* Isolate bits 15 & 16 */
   13939 		eecd = ((eecd >> 15) & 0x03);
   13940 
   13941 		/* If both bits are set, device is Flash type */
   13942 		if (eecd == 0x03)
   13943 			return 0;
   13944 	}
   13945 	return 1;
   13946 }
   13947 
   13948 static int
   13949 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13950 {
   13951 	uint32_t eec;
   13952 
   13953 	eec = CSR_READ(sc, WMREG_EEC);
   13954 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13955 		return 1;
   13956 
   13957 	return 0;
   13958 }
   13959 
   13960 /*
   13961  * wm_nvm_validate_checksum
   13962  *
   13963  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13964  */
   13965 static int
   13966 wm_nvm_validate_checksum(struct wm_softc *sc)
   13967 {
   13968 	uint16_t checksum;
   13969 	uint16_t eeprom_data;
   13970 #ifdef WM_DEBUG
   13971 	uint16_t csum_wordaddr, valid_checksum;
   13972 #endif
   13973 	int i;
   13974 
   13975 	checksum = 0;
   13976 
   13977 	/* Don't check for I211 */
   13978 	if (sc->sc_type == WM_T_I211)
   13979 		return 0;
   13980 
   13981 #ifdef WM_DEBUG
   13982 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13983 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13984 		csum_wordaddr = NVM_OFF_COMPAT;
   13985 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13986 	} else {
   13987 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13988 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13989 	}
   13990 
   13991 	/* Dump EEPROM image for debug */
   13992 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13993 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13994 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13995 		/* XXX PCH_SPT? */
   13996 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13997 		if ((eeprom_data & valid_checksum) == 0)
   13998 			DPRINTF(sc, WM_DEBUG_NVM,
   13999 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   14000 				device_xname(sc->sc_dev), eeprom_data,
   14001 				    valid_checksum));
   14002 	}
   14003 
   14004 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   14005 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   14006 		for (i = 0; i < NVM_SIZE; i++) {
   14007 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14008 				printf("XXXX ");
   14009 			else
   14010 				printf("%04hx ", eeprom_data);
   14011 			if (i % 8 == 7)
   14012 				printf("\n");
   14013 		}
   14014 	}
   14015 
   14016 #endif /* WM_DEBUG */
   14017 
   14018 	for (i = 0; i < NVM_SIZE; i++) {
   14019 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14020 			return 1;
   14021 		checksum += eeprom_data;
   14022 	}
   14023 
   14024 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14025 #ifdef WM_DEBUG
   14026 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14027 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14028 #endif
   14029 	}
   14030 
   14031 	return 0;
   14032 }
   14033 
   14034 static void
   14035 wm_nvm_version_invm(struct wm_softc *sc)
   14036 {
   14037 	uint32_t dword;
   14038 
   14039 	/*
   14040 	 * Linux's code to decode version is very strange, so we don't
   14041 	 * obey that algorithm and just use word 61 as the document.
   14042 	 * Perhaps it's not perfect though...
   14043 	 *
   14044 	 * Example:
   14045 	 *
   14046 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14047 	 */
   14048 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14049 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14050 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14051 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14052 }
   14053 
   14054 static void
   14055 wm_nvm_version(struct wm_softc *sc)
   14056 {
   14057 	uint16_t major, minor, build, patch;
   14058 	uint16_t uid0, uid1;
   14059 	uint16_t nvm_data;
   14060 	uint16_t off;
   14061 	bool check_version = false;
   14062 	bool check_optionrom = false;
   14063 	bool have_build = false;
   14064 	bool have_uid = true;
   14065 
   14066 	/*
   14067 	 * Version format:
   14068 	 *
   14069 	 * XYYZ
   14070 	 * X0YZ
   14071 	 * X0YY
   14072 	 *
   14073 	 * Example:
   14074 	 *
   14075 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14076 	 *	82571	0x50a6	5.10.6?
   14077 	 *	82572	0x506a	5.6.10?
   14078 	 *	82572EI	0x5069	5.6.9?
   14079 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14080 	 *		0x2013	2.1.3?
   14081 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14082 	 * ICH8+82567	0x0040	0.4.0?
   14083 	 * ICH9+82566	0x1040	1.4.0?
   14084 	 *ICH10+82567	0x0043	0.4.3?
   14085 	 *  PCH+82577	0x00c1	0.12.1?
   14086 	 * PCH2+82579	0x00d3	0.13.3?
   14087 	 *		0x00d4	0.13.4?
   14088 	 *  LPT+I218	0x0023	0.2.3?
   14089 	 *  SPT+I219	0x0084	0.8.4?
   14090 	 *  CNP+I219	0x0054	0.5.4?
   14091 	 */
   14092 
   14093 	/*
   14094 	 * XXX
   14095 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14096 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   14097 	 */
   14098 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14099 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14100 		have_uid = false;
   14101 
   14102 	switch (sc->sc_type) {
   14103 	case WM_T_82571:
   14104 	case WM_T_82572:
   14105 	case WM_T_82574:
   14106 	case WM_T_82583:
   14107 		check_version = true;
   14108 		check_optionrom = true;
   14109 		have_build = true;
   14110 		break;
   14111 	case WM_T_ICH8:
   14112 	case WM_T_ICH9:
   14113 	case WM_T_ICH10:
   14114 	case WM_T_PCH:
   14115 	case WM_T_PCH2:
   14116 	case WM_T_PCH_LPT:
   14117 	case WM_T_PCH_SPT:
   14118 	case WM_T_PCH_CNP:
   14119 		check_version = true;
   14120 		have_build = true;
   14121 		have_uid = false;
   14122 		break;
   14123 	case WM_T_82575:
   14124 	case WM_T_82576:
   14125 	case WM_T_82580:
   14126 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14127 			check_version = true;
   14128 		break;
   14129 	case WM_T_I211:
   14130 		wm_nvm_version_invm(sc);
   14131 		have_uid = false;
   14132 		goto printver;
   14133 	case WM_T_I210:
   14134 		if (!wm_nvm_flash_presence_i210(sc)) {
   14135 			wm_nvm_version_invm(sc);
   14136 			have_uid = false;
   14137 			goto printver;
   14138 		}
   14139 		/* FALLTHROUGH */
   14140 	case WM_T_I350:
   14141 	case WM_T_I354:
   14142 		check_version = true;
   14143 		check_optionrom = true;
   14144 		break;
   14145 	default:
   14146 		return;
   14147 	}
   14148 	if (check_version
   14149 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14150 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14151 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14152 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14153 			build = nvm_data & NVM_BUILD_MASK;
   14154 			have_build = true;
   14155 		} else
   14156 			minor = nvm_data & 0x00ff;
   14157 
   14158 		/* Decimal */
   14159 		minor = (minor / 16) * 10 + (minor % 16);
   14160 		sc->sc_nvm_ver_major = major;
   14161 		sc->sc_nvm_ver_minor = minor;
   14162 
   14163 printver:
   14164 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14165 		    sc->sc_nvm_ver_minor);
   14166 		if (have_build) {
   14167 			sc->sc_nvm_ver_build = build;
   14168 			aprint_verbose(".%d", build);
   14169 		}
   14170 	}
   14171 
   14172 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14173 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14174 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14175 		/* Option ROM Version */
   14176 		if ((off != 0x0000) && (off != 0xffff)) {
   14177 			int rv;
   14178 
   14179 			off += NVM_COMBO_VER_OFF;
   14180 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14181 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14182 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14183 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14184 				/* 16bits */
   14185 				major = uid0 >> 8;
   14186 				build = (uid0 << 8) | (uid1 >> 8);
   14187 				patch = uid1 & 0x00ff;
   14188 				aprint_verbose(", option ROM Version %d.%d.%d",
   14189 				    major, build, patch);
   14190 			}
   14191 		}
   14192 	}
   14193 
   14194 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14195 		aprint_verbose(", Image Unique ID %08x",
   14196 		    ((uint32_t)uid1 << 16) | uid0);
   14197 }
   14198 
   14199 /*
   14200  * wm_nvm_read:
   14201  *
   14202  *	Read data from the serial EEPROM.
   14203  */
   14204 static int
   14205 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14206 {
   14207 	int rv;
   14208 
   14209 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14210 		device_xname(sc->sc_dev), __func__));
   14211 
   14212 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14213 		return -1;
   14214 
   14215 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14216 
   14217 	return rv;
   14218 }
   14219 
   14220 /*
   14221  * Hardware semaphores.
   14222  * Very complexed...
   14223  */
   14224 
   14225 static int
   14226 wm_get_null(struct wm_softc *sc)
   14227 {
   14228 
   14229 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14230 		device_xname(sc->sc_dev), __func__));
   14231 	return 0;
   14232 }
   14233 
   14234 static void
   14235 wm_put_null(struct wm_softc *sc)
   14236 {
   14237 
   14238 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14239 		device_xname(sc->sc_dev), __func__));
   14240 	return;
   14241 }
   14242 
   14243 static int
   14244 wm_get_eecd(struct wm_softc *sc)
   14245 {
   14246 	uint32_t reg;
   14247 	int x;
   14248 
   14249 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14250 		device_xname(sc->sc_dev), __func__));
   14251 
   14252 	reg = CSR_READ(sc, WMREG_EECD);
   14253 
   14254 	/* Request EEPROM access. */
   14255 	reg |= EECD_EE_REQ;
   14256 	CSR_WRITE(sc, WMREG_EECD, reg);
   14257 
   14258 	/* ..and wait for it to be granted. */
   14259 	for (x = 0; x < 1000; x++) {
   14260 		reg = CSR_READ(sc, WMREG_EECD);
   14261 		if (reg & EECD_EE_GNT)
   14262 			break;
   14263 		delay(5);
   14264 	}
   14265 	if ((reg & EECD_EE_GNT) == 0) {
   14266 		aprint_error_dev(sc->sc_dev,
   14267 		    "could not acquire EEPROM GNT\n");
   14268 		reg &= ~EECD_EE_REQ;
   14269 		CSR_WRITE(sc, WMREG_EECD, reg);
   14270 		return -1;
   14271 	}
   14272 
   14273 	return 0;
   14274 }
   14275 
   14276 static void
   14277 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14278 {
   14279 
   14280 	*eecd |= EECD_SK;
   14281 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14282 	CSR_WRITE_FLUSH(sc);
   14283 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14284 		delay(1);
   14285 	else
   14286 		delay(50);
   14287 }
   14288 
   14289 static void
   14290 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14291 {
   14292 
   14293 	*eecd &= ~EECD_SK;
   14294 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14295 	CSR_WRITE_FLUSH(sc);
   14296 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14297 		delay(1);
   14298 	else
   14299 		delay(50);
   14300 }
   14301 
   14302 static void
   14303 wm_put_eecd(struct wm_softc *sc)
   14304 {
   14305 	uint32_t reg;
   14306 
   14307 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14308 		device_xname(sc->sc_dev), __func__));
   14309 
   14310 	/* Stop nvm */
   14311 	reg = CSR_READ(sc, WMREG_EECD);
   14312 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14313 		/* Pull CS high */
   14314 		reg |= EECD_CS;
   14315 		wm_nvm_eec_clock_lower(sc, &reg);
   14316 	} else {
   14317 		/* CS on Microwire is active-high */
   14318 		reg &= ~(EECD_CS | EECD_DI);
   14319 		CSR_WRITE(sc, WMREG_EECD, reg);
   14320 		wm_nvm_eec_clock_raise(sc, &reg);
   14321 		wm_nvm_eec_clock_lower(sc, &reg);
   14322 	}
   14323 
   14324 	reg = CSR_READ(sc, WMREG_EECD);
   14325 	reg &= ~EECD_EE_REQ;
   14326 	CSR_WRITE(sc, WMREG_EECD, reg);
   14327 
   14328 	return;
   14329 }
   14330 
   14331 /*
   14332  * Get hardware semaphore.
   14333  * Same as e1000_get_hw_semaphore_generic()
   14334  */
   14335 static int
   14336 wm_get_swsm_semaphore(struct wm_softc *sc)
   14337 {
   14338 	int32_t timeout;
   14339 	uint32_t swsm;
   14340 
   14341 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14342 		device_xname(sc->sc_dev), __func__));
   14343 	KASSERT(sc->sc_nvm_wordsize > 0);
   14344 
   14345 retry:
   14346 	/* Get the SW semaphore. */
   14347 	timeout = sc->sc_nvm_wordsize + 1;
   14348 	while (timeout) {
   14349 		swsm = CSR_READ(sc, WMREG_SWSM);
   14350 
   14351 		if ((swsm & SWSM_SMBI) == 0)
   14352 			break;
   14353 
   14354 		delay(50);
   14355 		timeout--;
   14356 	}
   14357 
   14358 	if (timeout == 0) {
   14359 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14360 			/*
   14361 			 * In rare circumstances, the SW semaphore may already
   14362 			 * be held unintentionally. Clear the semaphore once
   14363 			 * before giving up.
   14364 			 */
   14365 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14366 			wm_put_swsm_semaphore(sc);
   14367 			goto retry;
   14368 		}
   14369 		aprint_error_dev(sc->sc_dev,
   14370 		    "could not acquire SWSM SMBI\n");
   14371 		return 1;
   14372 	}
   14373 
   14374 	/* Get the FW semaphore. */
   14375 	timeout = sc->sc_nvm_wordsize + 1;
   14376 	while (timeout) {
   14377 		swsm = CSR_READ(sc, WMREG_SWSM);
   14378 		swsm |= SWSM_SWESMBI;
   14379 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14380 		/* If we managed to set the bit we got the semaphore. */
   14381 		swsm = CSR_READ(sc, WMREG_SWSM);
   14382 		if (swsm & SWSM_SWESMBI)
   14383 			break;
   14384 
   14385 		delay(50);
   14386 		timeout--;
   14387 	}
   14388 
   14389 	if (timeout == 0) {
   14390 		aprint_error_dev(sc->sc_dev,
   14391 		    "could not acquire SWSM SWESMBI\n");
   14392 		/* Release semaphores */
   14393 		wm_put_swsm_semaphore(sc);
   14394 		return 1;
   14395 	}
   14396 	return 0;
   14397 }
   14398 
   14399 /*
   14400  * Put hardware semaphore.
   14401  * Same as e1000_put_hw_semaphore_generic()
   14402  */
   14403 static void
   14404 wm_put_swsm_semaphore(struct wm_softc *sc)
   14405 {
   14406 	uint32_t swsm;
   14407 
   14408 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14409 		device_xname(sc->sc_dev), __func__));
   14410 
   14411 	swsm = CSR_READ(sc, WMREG_SWSM);
   14412 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14413 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14414 }
   14415 
   14416 /*
   14417  * Get SW/FW semaphore.
   14418  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14419  */
   14420 static int
   14421 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14422 {
   14423 	uint32_t swfw_sync;
   14424 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14425 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14426 	int timeout;
   14427 
   14428 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14429 		device_xname(sc->sc_dev), __func__));
   14430 
   14431 	if (sc->sc_type == WM_T_80003)
   14432 		timeout = 50;
   14433 	else
   14434 		timeout = 200;
   14435 
   14436 	while (timeout) {
   14437 		if (wm_get_swsm_semaphore(sc)) {
   14438 			aprint_error_dev(sc->sc_dev,
   14439 			    "%s: failed to get semaphore\n",
   14440 			    __func__);
   14441 			return 1;
   14442 		}
   14443 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14444 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14445 			swfw_sync |= swmask;
   14446 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14447 			wm_put_swsm_semaphore(sc);
   14448 			return 0;
   14449 		}
   14450 		wm_put_swsm_semaphore(sc);
   14451 		delay(5000);
   14452 		timeout--;
   14453 	}
   14454 	device_printf(sc->sc_dev,
   14455 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14456 	    mask, swfw_sync);
   14457 	return 1;
   14458 }
   14459 
   14460 static void
   14461 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14462 {
   14463 	uint32_t swfw_sync;
   14464 
   14465 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14466 		device_xname(sc->sc_dev), __func__));
   14467 
   14468 	while (wm_get_swsm_semaphore(sc) != 0)
   14469 		continue;
   14470 
   14471 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14472 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14473 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14474 
   14475 	wm_put_swsm_semaphore(sc);
   14476 }
   14477 
   14478 static int
   14479 wm_get_nvm_80003(struct wm_softc *sc)
   14480 {
   14481 	int rv;
   14482 
   14483 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14484 		device_xname(sc->sc_dev), __func__));
   14485 
   14486 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14487 		aprint_error_dev(sc->sc_dev,
   14488 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14489 		return rv;
   14490 	}
   14491 
   14492 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14493 	    && (rv = wm_get_eecd(sc)) != 0) {
   14494 		aprint_error_dev(sc->sc_dev,
   14495 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14496 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14497 		return rv;
   14498 	}
   14499 
   14500 	return 0;
   14501 }
   14502 
   14503 static void
   14504 wm_put_nvm_80003(struct wm_softc *sc)
   14505 {
   14506 
   14507 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14508 		device_xname(sc->sc_dev), __func__));
   14509 
   14510 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14511 		wm_put_eecd(sc);
   14512 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14513 }
   14514 
   14515 static int
   14516 wm_get_nvm_82571(struct wm_softc *sc)
   14517 {
   14518 	int rv;
   14519 
   14520 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14521 		device_xname(sc->sc_dev), __func__));
   14522 
   14523 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14524 		return rv;
   14525 
   14526 	switch (sc->sc_type) {
   14527 	case WM_T_82573:
   14528 		break;
   14529 	default:
   14530 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14531 			rv = wm_get_eecd(sc);
   14532 		break;
   14533 	}
   14534 
   14535 	if (rv != 0) {
   14536 		aprint_error_dev(sc->sc_dev,
   14537 		    "%s: failed to get semaphore\n",
   14538 		    __func__);
   14539 		wm_put_swsm_semaphore(sc);
   14540 	}
   14541 
   14542 	return rv;
   14543 }
   14544 
   14545 static void
   14546 wm_put_nvm_82571(struct wm_softc *sc)
   14547 {
   14548 
   14549 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14550 		device_xname(sc->sc_dev), __func__));
   14551 
   14552 	switch (sc->sc_type) {
   14553 	case WM_T_82573:
   14554 		break;
   14555 	default:
   14556 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14557 			wm_put_eecd(sc);
   14558 		break;
   14559 	}
   14560 
   14561 	wm_put_swsm_semaphore(sc);
   14562 }
   14563 
   14564 static int
   14565 wm_get_phy_82575(struct wm_softc *sc)
   14566 {
   14567 
   14568 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14569 		device_xname(sc->sc_dev), __func__));
   14570 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14571 }
   14572 
   14573 static void
   14574 wm_put_phy_82575(struct wm_softc *sc)
   14575 {
   14576 
   14577 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14578 		device_xname(sc->sc_dev), __func__));
   14579 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14580 }
   14581 
   14582 static int
   14583 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14584 {
   14585 	uint32_t ext_ctrl;
   14586 	int timeout = 200;
   14587 
   14588 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14589 		device_xname(sc->sc_dev), __func__));
   14590 
   14591 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14592 	for (timeout = 0; timeout < 200; timeout++) {
   14593 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14594 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14595 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14596 
   14597 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14598 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14599 			return 0;
   14600 		delay(5000);
   14601 	}
   14602 	device_printf(sc->sc_dev,
   14603 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14604 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14605 	return 1;
   14606 }
   14607 
   14608 static void
   14609 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14610 {
   14611 	uint32_t ext_ctrl;
   14612 
   14613 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14614 		device_xname(sc->sc_dev), __func__));
   14615 
   14616 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14617 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14618 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14619 
   14620 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14621 }
   14622 
   14623 static int
   14624 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14625 {
   14626 	uint32_t ext_ctrl;
   14627 	int timeout;
   14628 
   14629 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14630 		device_xname(sc->sc_dev), __func__));
   14631 	mutex_enter(sc->sc_ich_phymtx);
   14632 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14633 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14634 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14635 			break;
   14636 		delay(1000);
   14637 	}
   14638 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14639 		device_printf(sc->sc_dev,
   14640 		    "SW has already locked the resource\n");
   14641 		goto out;
   14642 	}
   14643 
   14644 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14645 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14646 	for (timeout = 0; timeout < 1000; timeout++) {
   14647 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14648 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14649 			break;
   14650 		delay(1000);
   14651 	}
   14652 	if (timeout >= 1000) {
   14653 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14654 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14655 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14656 		goto out;
   14657 	}
   14658 	return 0;
   14659 
   14660 out:
   14661 	mutex_exit(sc->sc_ich_phymtx);
   14662 	return 1;
   14663 }
   14664 
   14665 static void
   14666 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14667 {
   14668 	uint32_t ext_ctrl;
   14669 
   14670 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14671 		device_xname(sc->sc_dev), __func__));
   14672 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14673 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14674 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14675 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14676 	} else {
   14677 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14678 	}
   14679 
   14680 	mutex_exit(sc->sc_ich_phymtx);
   14681 }
   14682 
   14683 static int
   14684 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14685 {
   14686 
   14687 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14688 		device_xname(sc->sc_dev), __func__));
   14689 	mutex_enter(sc->sc_ich_nvmmtx);
   14690 
   14691 	return 0;
   14692 }
   14693 
   14694 static void
   14695 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14696 {
   14697 
   14698 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14699 		device_xname(sc->sc_dev), __func__));
   14700 	mutex_exit(sc->sc_ich_nvmmtx);
   14701 }
   14702 
   14703 static int
   14704 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14705 {
   14706 	int i = 0;
   14707 	uint32_t reg;
   14708 
   14709 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14710 		device_xname(sc->sc_dev), __func__));
   14711 
   14712 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14713 	do {
   14714 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14715 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14716 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14717 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14718 			break;
   14719 		delay(2*1000);
   14720 		i++;
   14721 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14722 
   14723 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14724 		wm_put_hw_semaphore_82573(sc);
   14725 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14726 		    device_xname(sc->sc_dev));
   14727 		return -1;
   14728 	}
   14729 
   14730 	return 0;
   14731 }
   14732 
   14733 static void
   14734 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14735 {
   14736 	uint32_t reg;
   14737 
   14738 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14739 		device_xname(sc->sc_dev), __func__));
   14740 
   14741 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14742 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14743 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14744 }
   14745 
   14746 /*
   14747  * Management mode and power management related subroutines.
   14748  * BMC, AMT, suspend/resume and EEE.
   14749  */
   14750 
   14751 #ifdef WM_WOL
   14752 static int
   14753 wm_check_mng_mode(struct wm_softc *sc)
   14754 {
   14755 	int rv;
   14756 
   14757 	switch (sc->sc_type) {
   14758 	case WM_T_ICH8:
   14759 	case WM_T_ICH9:
   14760 	case WM_T_ICH10:
   14761 	case WM_T_PCH:
   14762 	case WM_T_PCH2:
   14763 	case WM_T_PCH_LPT:
   14764 	case WM_T_PCH_SPT:
   14765 	case WM_T_PCH_CNP:
   14766 		rv = wm_check_mng_mode_ich8lan(sc);
   14767 		break;
   14768 	case WM_T_82574:
   14769 	case WM_T_82583:
   14770 		rv = wm_check_mng_mode_82574(sc);
   14771 		break;
   14772 	case WM_T_82571:
   14773 	case WM_T_82572:
   14774 	case WM_T_82573:
   14775 	case WM_T_80003:
   14776 		rv = wm_check_mng_mode_generic(sc);
   14777 		break;
   14778 	default:
   14779 		/* Noting to do */
   14780 		rv = 0;
   14781 		break;
   14782 	}
   14783 
   14784 	return rv;
   14785 }
   14786 
   14787 static int
   14788 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14789 {
   14790 	uint32_t fwsm;
   14791 
   14792 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14793 
   14794 	if (((fwsm & FWSM_FW_VALID) != 0)
   14795 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14796 		return 1;
   14797 
   14798 	return 0;
   14799 }
   14800 
   14801 static int
   14802 wm_check_mng_mode_82574(struct wm_softc *sc)
   14803 {
   14804 	uint16_t data;
   14805 
   14806 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14807 
   14808 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14809 		return 1;
   14810 
   14811 	return 0;
   14812 }
   14813 
   14814 static int
   14815 wm_check_mng_mode_generic(struct wm_softc *sc)
   14816 {
   14817 	uint32_t fwsm;
   14818 
   14819 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14820 
   14821 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14822 		return 1;
   14823 
   14824 	return 0;
   14825 }
   14826 #endif /* WM_WOL */
   14827 
   14828 static int
   14829 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14830 {
   14831 	uint32_t manc, fwsm, factps;
   14832 
   14833 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14834 		return 0;
   14835 
   14836 	manc = CSR_READ(sc, WMREG_MANC);
   14837 
   14838 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14839 		device_xname(sc->sc_dev), manc));
   14840 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14841 		return 0;
   14842 
   14843 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14844 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14845 		factps = CSR_READ(sc, WMREG_FACTPS);
   14846 		if (((factps & FACTPS_MNGCG) == 0)
   14847 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14848 			return 1;
   14849 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14850 		uint16_t data;
   14851 
   14852 		factps = CSR_READ(sc, WMREG_FACTPS);
   14853 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14854 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14855 			device_xname(sc->sc_dev), factps, data));
   14856 		if (((factps & FACTPS_MNGCG) == 0)
   14857 		    && ((data & NVM_CFG2_MNGM_MASK)
   14858 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14859 			return 1;
   14860 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14861 	    && ((manc & MANC_ASF_EN) == 0))
   14862 		return 1;
   14863 
   14864 	return 0;
   14865 }
   14866 
   14867 static bool
   14868 wm_phy_resetisblocked(struct wm_softc *sc)
   14869 {
   14870 	bool blocked = false;
   14871 	uint32_t reg;
   14872 	int i = 0;
   14873 
   14874 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14875 		device_xname(sc->sc_dev), __func__));
   14876 
   14877 	switch (sc->sc_type) {
   14878 	case WM_T_ICH8:
   14879 	case WM_T_ICH9:
   14880 	case WM_T_ICH10:
   14881 	case WM_T_PCH:
   14882 	case WM_T_PCH2:
   14883 	case WM_T_PCH_LPT:
   14884 	case WM_T_PCH_SPT:
   14885 	case WM_T_PCH_CNP:
   14886 		do {
   14887 			reg = CSR_READ(sc, WMREG_FWSM);
   14888 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14889 				blocked = true;
   14890 				delay(10*1000);
   14891 				continue;
   14892 			}
   14893 			blocked = false;
   14894 		} while (blocked && (i++ < 30));
   14895 		return blocked;
   14896 		break;
   14897 	case WM_T_82571:
   14898 	case WM_T_82572:
   14899 	case WM_T_82573:
   14900 	case WM_T_82574:
   14901 	case WM_T_82583:
   14902 	case WM_T_80003:
   14903 		reg = CSR_READ(sc, WMREG_MANC);
   14904 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14905 			return true;
   14906 		else
   14907 			return false;
   14908 		break;
   14909 	default:
   14910 		/* No problem */
   14911 		break;
   14912 	}
   14913 
   14914 	return false;
   14915 }
   14916 
   14917 static void
   14918 wm_get_hw_control(struct wm_softc *sc)
   14919 {
   14920 	uint32_t reg;
   14921 
   14922 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14923 		device_xname(sc->sc_dev), __func__));
   14924 
   14925 	if (sc->sc_type == WM_T_82573) {
   14926 		reg = CSR_READ(sc, WMREG_SWSM);
   14927 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14928 	} else if (sc->sc_type >= WM_T_82571) {
   14929 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14930 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14931 	}
   14932 }
   14933 
   14934 static void
   14935 wm_release_hw_control(struct wm_softc *sc)
   14936 {
   14937 	uint32_t reg;
   14938 
   14939 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14940 		device_xname(sc->sc_dev), __func__));
   14941 
   14942 	if (sc->sc_type == WM_T_82573) {
   14943 		reg = CSR_READ(sc, WMREG_SWSM);
   14944 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14945 	} else if (sc->sc_type >= WM_T_82571) {
   14946 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14947 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14948 	}
   14949 }
   14950 
   14951 static void
   14952 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14953 {
   14954 	uint32_t reg;
   14955 
   14956 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14957 		device_xname(sc->sc_dev), __func__));
   14958 
   14959 	if (sc->sc_type < WM_T_PCH2)
   14960 		return;
   14961 
   14962 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14963 
   14964 	if (gate)
   14965 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14966 	else
   14967 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14968 
   14969 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14970 }
   14971 
   14972 static int
   14973 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14974 {
   14975 	uint32_t fwsm, reg;
   14976 	int rv = 0;
   14977 
   14978 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14979 		device_xname(sc->sc_dev), __func__));
   14980 
   14981 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14982 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14983 
   14984 	/* Disable ULP */
   14985 	wm_ulp_disable(sc);
   14986 
   14987 	/* Acquire PHY semaphore */
   14988 	rv = sc->phy.acquire(sc);
   14989 	if (rv != 0) {
   14990 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   14991 		device_xname(sc->sc_dev), __func__));
   14992 		return -1;
   14993 	}
   14994 
   14995 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14996 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14997 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14998 	 */
   14999 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15000 	switch (sc->sc_type) {
   15001 	case WM_T_PCH_LPT:
   15002 	case WM_T_PCH_SPT:
   15003 	case WM_T_PCH_CNP:
   15004 		if (wm_phy_is_accessible_pchlan(sc))
   15005 			break;
   15006 
   15007 		/* Before toggling LANPHYPC, see if PHY is accessible by
   15008 		 * forcing MAC to SMBus mode first.
   15009 		 */
   15010 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15011 		reg |= CTRL_EXT_FORCE_SMBUS;
   15012 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15013 #if 0
   15014 		/* XXX Isn't this required??? */
   15015 		CSR_WRITE_FLUSH(sc);
   15016 #endif
   15017 		/* Wait 50 milliseconds for MAC to finish any retries
   15018 		 * that it might be trying to perform from previous
   15019 		 * attempts to acknowledge any phy read requests.
   15020 		 */
   15021 		delay(50 * 1000);
   15022 		/* FALLTHROUGH */
   15023 	case WM_T_PCH2:
   15024 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15025 			break;
   15026 		/* FALLTHROUGH */
   15027 	case WM_T_PCH:
   15028 		if (sc->sc_type == WM_T_PCH)
   15029 			if ((fwsm & FWSM_FW_VALID) != 0)
   15030 				break;
   15031 
   15032 		if (wm_phy_resetisblocked(sc) == true) {
   15033 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15034 			break;
   15035 		}
   15036 
   15037 		/* Toggle LANPHYPC Value bit */
   15038 		wm_toggle_lanphypc_pch_lpt(sc);
   15039 
   15040 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15041 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15042 				break;
   15043 
   15044 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15045 			 * so ensure that the MAC is also out of SMBus mode
   15046 			 */
   15047 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15048 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15049 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15050 
   15051 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15052 				break;
   15053 			rv = -1;
   15054 		}
   15055 		break;
   15056 	default:
   15057 		break;
   15058 	}
   15059 
   15060 	/* Release semaphore */
   15061 	sc->phy.release(sc);
   15062 
   15063 	if (rv == 0) {
   15064 		/* Check to see if able to reset PHY.  Print error if not */
   15065 		if (wm_phy_resetisblocked(sc)) {
   15066 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15067 			goto out;
   15068 		}
   15069 
   15070 		/* Reset the PHY before any access to it.  Doing so, ensures
   15071 		 * that the PHY is in a known good state before we read/write
   15072 		 * PHY registers.  The generic reset is sufficient here,
   15073 		 * because we haven't determined the PHY type yet.
   15074 		 */
   15075 		if (wm_reset_phy(sc) != 0)
   15076 			goto out;
   15077 
   15078 		/* On a successful reset, possibly need to wait for the PHY
   15079 		 * to quiesce to an accessible state before returning control
   15080 		 * to the calling function.  If the PHY does not quiesce, then
   15081 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15082 		 *  the PHY is in.
   15083 		 */
   15084 		if (wm_phy_resetisblocked(sc))
   15085 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15086 	}
   15087 
   15088 out:
   15089 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15090 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15091 		delay(10*1000);
   15092 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15093 	}
   15094 
   15095 	return 0;
   15096 }
   15097 
   15098 static void
   15099 wm_init_manageability(struct wm_softc *sc)
   15100 {
   15101 
   15102 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15103 		device_xname(sc->sc_dev), __func__));
   15104 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15105 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15106 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15107 
   15108 		/* Disable hardware interception of ARP */
   15109 		manc &= ~MANC_ARP_EN;
   15110 
   15111 		/* Enable receiving management packets to the host */
   15112 		if (sc->sc_type >= WM_T_82571) {
   15113 			manc |= MANC_EN_MNG2HOST;
   15114 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15115 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15116 		}
   15117 
   15118 		CSR_WRITE(sc, WMREG_MANC, manc);
   15119 	}
   15120 }
   15121 
   15122 static void
   15123 wm_release_manageability(struct wm_softc *sc)
   15124 {
   15125 
   15126 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15127 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15128 
   15129 		manc |= MANC_ARP_EN;
   15130 		if (sc->sc_type >= WM_T_82571)
   15131 			manc &= ~MANC_EN_MNG2HOST;
   15132 
   15133 		CSR_WRITE(sc, WMREG_MANC, manc);
   15134 	}
   15135 }
   15136 
   15137 static void
   15138 wm_get_wakeup(struct wm_softc *sc)
   15139 {
   15140 
   15141 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15142 	switch (sc->sc_type) {
   15143 	case WM_T_82573:
   15144 	case WM_T_82583:
   15145 		sc->sc_flags |= WM_F_HAS_AMT;
   15146 		/* FALLTHROUGH */
   15147 	case WM_T_80003:
   15148 	case WM_T_82575:
   15149 	case WM_T_82576:
   15150 	case WM_T_82580:
   15151 	case WM_T_I350:
   15152 	case WM_T_I354:
   15153 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15154 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15155 		/* FALLTHROUGH */
   15156 	case WM_T_82541:
   15157 	case WM_T_82541_2:
   15158 	case WM_T_82547:
   15159 	case WM_T_82547_2:
   15160 	case WM_T_82571:
   15161 	case WM_T_82572:
   15162 	case WM_T_82574:
   15163 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15164 		break;
   15165 	case WM_T_ICH8:
   15166 	case WM_T_ICH9:
   15167 	case WM_T_ICH10:
   15168 	case WM_T_PCH:
   15169 	case WM_T_PCH2:
   15170 	case WM_T_PCH_LPT:
   15171 	case WM_T_PCH_SPT:
   15172 	case WM_T_PCH_CNP:
   15173 		sc->sc_flags |= WM_F_HAS_AMT;
   15174 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15175 		break;
   15176 	default:
   15177 		break;
   15178 	}
   15179 
   15180 	/* 1: HAS_MANAGE */
   15181 	if (wm_enable_mng_pass_thru(sc) != 0)
   15182 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15183 
   15184 	/*
   15185 	 * Note that the WOL flags is set after the resetting of the eeprom
   15186 	 * stuff
   15187 	 */
   15188 }
   15189 
   15190 /*
   15191  * Unconfigure Ultra Low Power mode.
   15192  * Only for I217 and newer (see below).
   15193  */
   15194 static int
   15195 wm_ulp_disable(struct wm_softc *sc)
   15196 {
   15197 	uint32_t reg;
   15198 	uint16_t phyreg;
   15199 	int i = 0, rv = 0;
   15200 
   15201 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15202 		device_xname(sc->sc_dev), __func__));
   15203 	/* Exclude old devices */
   15204 	if ((sc->sc_type < WM_T_PCH_LPT)
   15205 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15206 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15207 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15208 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15209 		return 0;
   15210 
   15211 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15212 		/* Request ME un-configure ULP mode in the PHY */
   15213 		reg = CSR_READ(sc, WMREG_H2ME);
   15214 		reg &= ~H2ME_ULP;
   15215 		reg |= H2ME_ENFORCE_SETTINGS;
   15216 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15217 
   15218 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15219 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15220 			if (i++ == 30) {
   15221 				device_printf(sc->sc_dev, "%s timed out\n",
   15222 				    __func__);
   15223 				return -1;
   15224 			}
   15225 			delay(10 * 1000);
   15226 		}
   15227 		reg = CSR_READ(sc, WMREG_H2ME);
   15228 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15229 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15230 
   15231 		return 0;
   15232 	}
   15233 
   15234 	/* Acquire semaphore */
   15235 	rv = sc->phy.acquire(sc);
   15236 	if (rv != 0) {
   15237 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15238 		device_xname(sc->sc_dev), __func__));
   15239 		return -1;
   15240 	}
   15241 
   15242 	/* Toggle LANPHYPC */
   15243 	wm_toggle_lanphypc_pch_lpt(sc);
   15244 
   15245 	/* Unforce SMBus mode in PHY */
   15246 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15247 	if (rv != 0) {
   15248 		uint32_t reg2;
   15249 
   15250 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15251 			__func__);
   15252 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15253 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15254 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15255 		delay(50 * 1000);
   15256 
   15257 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15258 		    &phyreg);
   15259 		if (rv != 0)
   15260 			goto release;
   15261 	}
   15262 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15263 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15264 
   15265 	/* Unforce SMBus mode in MAC */
   15266 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15267 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15268 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15269 
   15270 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15271 	if (rv != 0)
   15272 		goto release;
   15273 	phyreg |= HV_PM_CTRL_K1_ENA;
   15274 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15275 
   15276 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15277 		&phyreg);
   15278 	if (rv != 0)
   15279 		goto release;
   15280 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15281 	    | I218_ULP_CONFIG1_STICKY_ULP
   15282 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15283 	    | I218_ULP_CONFIG1_WOL_HOST
   15284 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15285 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15286 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15287 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15288 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15289 	phyreg |= I218_ULP_CONFIG1_START;
   15290 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15291 
   15292 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15293 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15294 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15295 
   15296 release:
   15297 	/* Release semaphore */
   15298 	sc->phy.release(sc);
   15299 	wm_gmii_reset(sc);
   15300 	delay(50 * 1000);
   15301 
   15302 	return rv;
   15303 }
   15304 
   15305 /* WOL in the newer chipset interfaces (pchlan) */
   15306 static int
   15307 wm_enable_phy_wakeup(struct wm_softc *sc)
   15308 {
   15309 	device_t dev = sc->sc_dev;
   15310 	uint32_t mreg, moff;
   15311 	uint16_t wuce, wuc, wufc, preg;
   15312 	int i, rv;
   15313 
   15314 	KASSERT(sc->sc_type >= WM_T_PCH);
   15315 
   15316 	/* Copy MAC RARs to PHY RARs */
   15317 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15318 
   15319 	/* Activate PHY wakeup */
   15320 	rv = sc->phy.acquire(sc);
   15321 	if (rv != 0) {
   15322 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15323 		    __func__);
   15324 		return rv;
   15325 	}
   15326 
   15327 	/*
   15328 	 * Enable access to PHY wakeup registers.
   15329 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15330 	 */
   15331 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15332 	if (rv != 0) {
   15333 		device_printf(dev,
   15334 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15335 		goto release;
   15336 	}
   15337 
   15338 	/* Copy MAC MTA to PHY MTA */
   15339 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15340 		uint16_t lo, hi;
   15341 
   15342 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15343 		lo = (uint16_t)(mreg & 0xffff);
   15344 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15345 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15346 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15347 	}
   15348 
   15349 	/* Configure PHY Rx Control register */
   15350 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15351 	mreg = CSR_READ(sc, WMREG_RCTL);
   15352 	if (mreg & RCTL_UPE)
   15353 		preg |= BM_RCTL_UPE;
   15354 	if (mreg & RCTL_MPE)
   15355 		preg |= BM_RCTL_MPE;
   15356 	preg &= ~(BM_RCTL_MO_MASK);
   15357 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15358 	if (moff != 0)
   15359 		preg |= moff << BM_RCTL_MO_SHIFT;
   15360 	if (mreg & RCTL_BAM)
   15361 		preg |= BM_RCTL_BAM;
   15362 	if (mreg & RCTL_PMCF)
   15363 		preg |= BM_RCTL_PMCF;
   15364 	mreg = CSR_READ(sc, WMREG_CTRL);
   15365 	if (mreg & CTRL_RFCE)
   15366 		preg |= BM_RCTL_RFCE;
   15367 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15368 
   15369 	wuc = WUC_APME | WUC_PME_EN;
   15370 	wufc = WUFC_MAG;
   15371 	/* Enable PHY wakeup in MAC register */
   15372 	CSR_WRITE(sc, WMREG_WUC,
   15373 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15374 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15375 
   15376 	/* Configure and enable PHY wakeup in PHY registers */
   15377 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15378 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15379 
   15380 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15381 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15382 
   15383 release:
   15384 	sc->phy.release(sc);
   15385 
   15386 	return 0;
   15387 }
   15388 
   15389 /* Power down workaround on D3 */
   15390 static void
   15391 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15392 {
   15393 	uint32_t reg;
   15394 	uint16_t phyreg;
   15395 	int i;
   15396 
   15397 	for (i = 0; i < 2; i++) {
   15398 		/* Disable link */
   15399 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15400 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15401 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15402 
   15403 		/*
   15404 		 * Call gig speed drop workaround on Gig disable before
   15405 		 * accessing any PHY registers
   15406 		 */
   15407 		if (sc->sc_type == WM_T_ICH8)
   15408 			wm_gig_downshift_workaround_ich8lan(sc);
   15409 
   15410 		/* Write VR power-down enable */
   15411 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15412 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15413 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15414 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15415 
   15416 		/* Read it back and test */
   15417 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15418 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15419 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15420 			break;
   15421 
   15422 		/* Issue PHY reset and repeat at most one more time */
   15423 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15424 	}
   15425 }
   15426 
   15427 /*
   15428  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15429  *  @sc: pointer to the HW structure
   15430  *
   15431  *  During S0 to Sx transition, it is possible the link remains at gig
   15432  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15433  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15434  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15435  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15436  *  needs to be written.
   15437  *  Parts that support (and are linked to a partner which support) EEE in
   15438  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15439  *  than 10Mbps w/o EEE.
   15440  */
   15441 static void
   15442 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15443 {
   15444 	device_t dev = sc->sc_dev;
   15445 	struct ethercom *ec = &sc->sc_ethercom;
   15446 	uint32_t phy_ctrl;
   15447 	int rv;
   15448 
   15449 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15450 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15451 
   15452 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15453 
   15454 	if (sc->sc_phytype == WMPHY_I217) {
   15455 		uint16_t devid = sc->sc_pcidevid;
   15456 
   15457 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15458 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15459 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15460 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15461 		    (sc->sc_type >= WM_T_PCH_SPT))
   15462 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15463 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15464 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15465 
   15466 		if (sc->phy.acquire(sc) != 0)
   15467 			goto out;
   15468 
   15469 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15470 			uint16_t eee_advert;
   15471 
   15472 			rv = wm_read_emi_reg_locked(dev,
   15473 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15474 			if (rv)
   15475 				goto release;
   15476 
   15477 			/*
   15478 			 * Disable LPLU if both link partners support 100BaseT
   15479 			 * EEE and 100Full is advertised on both ends of the
   15480 			 * link, and enable Auto Enable LPI since there will
   15481 			 * be no driver to enable LPI while in Sx.
   15482 			 */
   15483 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15484 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15485 				uint16_t anar, phy_reg;
   15486 
   15487 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15488 				    &anar);
   15489 				if (anar & ANAR_TX_FD) {
   15490 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15491 					    PHY_CTRL_NOND0A_LPLU);
   15492 
   15493 					/* Set Auto Enable LPI after link up */
   15494 					sc->phy.readreg_locked(dev, 2,
   15495 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15496 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15497 					sc->phy.writereg_locked(dev, 2,
   15498 					    I217_LPI_GPIO_CTRL, phy_reg);
   15499 				}
   15500 			}
   15501 		}
   15502 
   15503 		/*
   15504 		 * For i217 Intel Rapid Start Technology support,
   15505 		 * when the system is going into Sx and no manageability engine
   15506 		 * is present, the driver must configure proxy to reset only on
   15507 		 * power good.	LPI (Low Power Idle) state must also reset only
   15508 		 * on power good, as well as the MTA (Multicast table array).
   15509 		 * The SMBus release must also be disabled on LCD reset.
   15510 		 */
   15511 
   15512 		/*
   15513 		 * Enable MTA to reset for Intel Rapid Start Technology
   15514 		 * Support
   15515 		 */
   15516 
   15517 release:
   15518 		sc->phy.release(sc);
   15519 	}
   15520 out:
   15521 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15522 
   15523 	if (sc->sc_type == WM_T_ICH8)
   15524 		wm_gig_downshift_workaround_ich8lan(sc);
   15525 
   15526 	if (sc->sc_type >= WM_T_PCH) {
   15527 		wm_oem_bits_config_ich8lan(sc, false);
   15528 
   15529 		/* Reset PHY to activate OEM bits on 82577/8 */
   15530 		if (sc->sc_type == WM_T_PCH)
   15531 			wm_reset_phy(sc);
   15532 
   15533 		if (sc->phy.acquire(sc) != 0)
   15534 			return;
   15535 		wm_write_smbus_addr(sc);
   15536 		sc->phy.release(sc);
   15537 	}
   15538 }
   15539 
   15540 /*
   15541  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15542  *  @sc: pointer to the HW structure
   15543  *
   15544  *  During Sx to S0 transitions on non-managed devices or managed devices
   15545  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15546  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15547  *  the PHY.
   15548  *  On i217, setup Intel Rapid Start Technology.
   15549  */
   15550 static int
   15551 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15552 {
   15553 	device_t dev = sc->sc_dev;
   15554 	int rv;
   15555 
   15556 	if (sc->sc_type < WM_T_PCH2)
   15557 		return 0;
   15558 
   15559 	rv = wm_init_phy_workarounds_pchlan(sc);
   15560 	if (rv != 0)
   15561 		return -1;
   15562 
   15563 	/* For i217 Intel Rapid Start Technology support when the system
   15564 	 * is transitioning from Sx and no manageability engine is present
   15565 	 * configure SMBus to restore on reset, disable proxy, and enable
   15566 	 * the reset on MTA (Multicast table array).
   15567 	 */
   15568 	if (sc->sc_phytype == WMPHY_I217) {
   15569 		uint16_t phy_reg;
   15570 
   15571 		if (sc->phy.acquire(sc) != 0)
   15572 			return -1;
   15573 
   15574 		/* Clear Auto Enable LPI after link up */
   15575 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15576 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15577 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15578 
   15579 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15580 			/* Restore clear on SMB if no manageability engine
   15581 			 * is present
   15582 			 */
   15583 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15584 			    &phy_reg);
   15585 			if (rv != 0)
   15586 				goto release;
   15587 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15588 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15589 
   15590 			/* Disable Proxy */
   15591 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15592 		}
   15593 		/* Enable reset on MTA */
   15594 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15595 		if (rv != 0)
   15596 			goto release;
   15597 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15598 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15599 
   15600 release:
   15601 		sc->phy.release(sc);
   15602 		return rv;
   15603 	}
   15604 
   15605 	return 0;
   15606 }
   15607 
   15608 static void
   15609 wm_enable_wakeup(struct wm_softc *sc)
   15610 {
   15611 	uint32_t reg, pmreg;
   15612 	pcireg_t pmode;
   15613 	int rv = 0;
   15614 
   15615 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15616 		device_xname(sc->sc_dev), __func__));
   15617 
   15618 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15619 	    &pmreg, NULL) == 0)
   15620 		return;
   15621 
   15622 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15623 		goto pme;
   15624 
   15625 	/* Advertise the wakeup capability */
   15626 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15627 	    | CTRL_SWDPIN(3));
   15628 
   15629 	/* Keep the laser running on fiber adapters */
   15630 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15631 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15632 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15633 		reg |= CTRL_EXT_SWDPIN(3);
   15634 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15635 	}
   15636 
   15637 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15638 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15639 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15640 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15641 		wm_suspend_workarounds_ich8lan(sc);
   15642 
   15643 #if 0	/* For the multicast packet */
   15644 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15645 	reg |= WUFC_MC;
   15646 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15647 #endif
   15648 
   15649 	if (sc->sc_type >= WM_T_PCH) {
   15650 		rv = wm_enable_phy_wakeup(sc);
   15651 		if (rv != 0)
   15652 			goto pme;
   15653 	} else {
   15654 		/* Enable wakeup by the MAC */
   15655 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15656 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15657 	}
   15658 
   15659 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15660 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15661 		|| (sc->sc_type == WM_T_PCH2))
   15662 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15663 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15664 
   15665 pme:
   15666 	/* Request PME */
   15667 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15668 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   15669 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15670 		/* For WOL */
   15671 		pmode |= PCI_PMCSR_PME_EN;
   15672 	} else {
   15673 		/* Disable WOL */
   15674 		pmode &= ~PCI_PMCSR_PME_EN;
   15675 	}
   15676 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15677 }
   15678 
   15679 /* Disable ASPM L0s and/or L1 for workaround */
   15680 static void
   15681 wm_disable_aspm(struct wm_softc *sc)
   15682 {
   15683 	pcireg_t reg, mask = 0;
   15684 	unsigned const char *str = "";
   15685 
   15686 	/*
   15687 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15688 	 * space.
   15689 	 */
   15690 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15691 		return;
   15692 
   15693 	switch (sc->sc_type) {
   15694 	case WM_T_82571:
   15695 	case WM_T_82572:
   15696 		/*
   15697 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15698 		 * State Power management L1 State (ASPM L1).
   15699 		 */
   15700 		mask = PCIE_LCSR_ASPM_L1;
   15701 		str = "L1 is";
   15702 		break;
   15703 	case WM_T_82573:
   15704 	case WM_T_82574:
   15705 	case WM_T_82583:
   15706 		/*
   15707 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15708 		 *
   15709 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15710 		 * some chipset.  The document of 82574 and 82583 says that
   15711 		 * disabling L0s with some specific chipset is sufficient,
   15712 		 * but we follow as of the Intel em driver does.
   15713 		 *
   15714 		 * References:
   15715 		 * Errata 8 of the Specification Update of i82573.
   15716 		 * Errata 20 of the Specification Update of i82574.
   15717 		 * Errata 9 of the Specification Update of i82583.
   15718 		 */
   15719 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15720 		str = "L0s and L1 are";
   15721 		break;
   15722 	default:
   15723 		return;
   15724 	}
   15725 
   15726 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15727 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15728 	reg &= ~mask;
   15729 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15730 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15731 
   15732 	/* Print only in wm_attach() */
   15733 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15734 		aprint_verbose_dev(sc->sc_dev,
   15735 		    "ASPM %s disabled to workaround the errata.\n", str);
   15736 }
   15737 
   15738 /* LPLU */
   15739 
   15740 static void
   15741 wm_lplu_d0_disable(struct wm_softc *sc)
   15742 {
   15743 	struct mii_data *mii = &sc->sc_mii;
   15744 	uint32_t reg;
   15745 	uint16_t phyval;
   15746 
   15747 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15748 		device_xname(sc->sc_dev), __func__));
   15749 
   15750 	if (sc->sc_phytype == WMPHY_IFE)
   15751 		return;
   15752 
   15753 	switch (sc->sc_type) {
   15754 	case WM_T_82571:
   15755 	case WM_T_82572:
   15756 	case WM_T_82573:
   15757 	case WM_T_82575:
   15758 	case WM_T_82576:
   15759 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   15760 		phyval &= ~PMR_D0_LPLU;
   15761 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   15762 		break;
   15763 	case WM_T_82580:
   15764 	case WM_T_I350:
   15765 	case WM_T_I210:
   15766 	case WM_T_I211:
   15767 		reg = CSR_READ(sc, WMREG_PHPM);
   15768 		reg &= ~PHPM_D0A_LPLU;
   15769 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15770 		break;
   15771 	case WM_T_82574:
   15772 	case WM_T_82583:
   15773 	case WM_T_ICH8:
   15774 	case WM_T_ICH9:
   15775 	case WM_T_ICH10:
   15776 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15777 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15778 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15779 		CSR_WRITE_FLUSH(sc);
   15780 		break;
   15781 	case WM_T_PCH:
   15782 	case WM_T_PCH2:
   15783 	case WM_T_PCH_LPT:
   15784 	case WM_T_PCH_SPT:
   15785 	case WM_T_PCH_CNP:
   15786 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15787 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15788 		if (wm_phy_resetisblocked(sc) == false)
   15789 			phyval |= HV_OEM_BITS_ANEGNOW;
   15790 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15791 		break;
   15792 	default:
   15793 		break;
   15794 	}
   15795 }
   15796 
   15797 /* EEE */
   15798 
   15799 static int
   15800 wm_set_eee_i350(struct wm_softc *sc)
   15801 {
   15802 	struct ethercom *ec = &sc->sc_ethercom;
   15803 	uint32_t ipcnfg, eeer;
   15804 	uint32_t ipcnfg_mask
   15805 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15806 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15807 
   15808 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15809 
   15810 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15811 	eeer = CSR_READ(sc, WMREG_EEER);
   15812 
   15813 	/* Enable or disable per user setting */
   15814 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15815 		ipcnfg |= ipcnfg_mask;
   15816 		eeer |= eeer_mask;
   15817 	} else {
   15818 		ipcnfg &= ~ipcnfg_mask;
   15819 		eeer &= ~eeer_mask;
   15820 	}
   15821 
   15822 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15823 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15824 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15825 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15826 
   15827 	return 0;
   15828 }
   15829 
   15830 static int
   15831 wm_set_eee_pchlan(struct wm_softc *sc)
   15832 {
   15833 	device_t dev = sc->sc_dev;
   15834 	struct ethercom *ec = &sc->sc_ethercom;
   15835 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15836 	int rv = 0;
   15837 
   15838 	switch (sc->sc_phytype) {
   15839 	case WMPHY_82579:
   15840 		lpa = I82579_EEE_LP_ABILITY;
   15841 		pcs_status = I82579_EEE_PCS_STATUS;
   15842 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15843 		break;
   15844 	case WMPHY_I217:
   15845 		lpa = I217_EEE_LP_ABILITY;
   15846 		pcs_status = I217_EEE_PCS_STATUS;
   15847 		adv_addr = I217_EEE_ADVERTISEMENT;
   15848 		break;
   15849 	default:
   15850 		return 0;
   15851 	}
   15852 
   15853 	if (sc->phy.acquire(sc)) {
   15854 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15855 		return 0;
   15856 	}
   15857 
   15858 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15859 	if (rv != 0)
   15860 		goto release;
   15861 
   15862 	/* Clear bits that enable EEE in various speeds */
   15863 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15864 
   15865 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15866 		/* Save off link partner's EEE ability */
   15867 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15868 		if (rv != 0)
   15869 			goto release;
   15870 
   15871 		/* Read EEE advertisement */
   15872 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15873 			goto release;
   15874 
   15875 		/*
   15876 		 * Enable EEE only for speeds in which the link partner is
   15877 		 * EEE capable and for which we advertise EEE.
   15878 		 */
   15879 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15880 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15881 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15882 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15883 			if ((data & ANLPAR_TX_FD) != 0)
   15884 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15885 			else {
   15886 				/*
   15887 				 * EEE is not supported in 100Half, so ignore
   15888 				 * partner's EEE in 100 ability if full-duplex
   15889 				 * is not advertised.
   15890 				 */
   15891 				sc->eee_lp_ability
   15892 				    &= ~AN_EEEADVERT_100_TX;
   15893 			}
   15894 		}
   15895 	}
   15896 
   15897 	if (sc->sc_phytype == WMPHY_82579) {
   15898 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15899 		if (rv != 0)
   15900 			goto release;
   15901 
   15902 		data &= ~I82579_LPI_PLL_SHUT_100;
   15903 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15904 	}
   15905 
   15906 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15907 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15908 		goto release;
   15909 
   15910 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15911 release:
   15912 	sc->phy.release(sc);
   15913 
   15914 	return rv;
   15915 }
   15916 
   15917 static int
   15918 wm_set_eee(struct wm_softc *sc)
   15919 {
   15920 	struct ethercom *ec = &sc->sc_ethercom;
   15921 
   15922 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15923 		return 0;
   15924 
   15925 	if (sc->sc_type == WM_T_I354) {
   15926 		/* I354 uses an external PHY */
   15927 		return 0; /* not yet */
   15928 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15929 		return wm_set_eee_i350(sc);
   15930 	else if (sc->sc_type >= WM_T_PCH2)
   15931 		return wm_set_eee_pchlan(sc);
   15932 
   15933 	return 0;
   15934 }
   15935 
   15936 /*
   15937  * Workarounds (mainly PHY related).
   15938  * Basically, PHY's workarounds are in the PHY drivers.
   15939  */
   15940 
   15941 /* Work-around for 82566 Kumeran PCS lock loss */
   15942 static int
   15943 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15944 {
   15945 	struct mii_data *mii = &sc->sc_mii;
   15946 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15947 	int i, reg, rv;
   15948 	uint16_t phyreg;
   15949 
   15950 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15951 		device_xname(sc->sc_dev), __func__));
   15952 
   15953 	/* If the link is not up, do nothing */
   15954 	if ((status & STATUS_LU) == 0)
   15955 		return 0;
   15956 
   15957 	/* Nothing to do if the link is other than 1Gbps */
   15958 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15959 		return 0;
   15960 
   15961 	for (i = 0; i < 10; i++) {
   15962 		/* read twice */
   15963 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15964 		if (rv != 0)
   15965 			return rv;
   15966 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15967 		if (rv != 0)
   15968 			return rv;
   15969 
   15970 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15971 			goto out;	/* GOOD! */
   15972 
   15973 		/* Reset the PHY */
   15974 		wm_reset_phy(sc);
   15975 		delay(5*1000);
   15976 	}
   15977 
   15978 	/* Disable GigE link negotiation */
   15979 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15980 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15981 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15982 
   15983 	/*
   15984 	 * Call gig speed drop workaround on Gig disable before accessing
   15985 	 * any PHY registers.
   15986 	 */
   15987 	wm_gig_downshift_workaround_ich8lan(sc);
   15988 
   15989 out:
   15990 	return 0;
   15991 }
   15992 
   15993 /*
   15994  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15995  *  @sc: pointer to the HW structure
   15996  *
   15997  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15998  *  LPLU, Gig disable, MDIC PHY reset):
   15999  *    1) Set Kumeran Near-end loopback
   16000  *    2) Clear Kumeran Near-end loopback
   16001  *  Should only be called for ICH8[m] devices with any 1G Phy.
   16002  */
   16003 static void
   16004 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   16005 {
   16006 	uint16_t kmreg;
   16007 
   16008 	/* Only for igp3 */
   16009 	if (sc->sc_phytype == WMPHY_IGP_3) {
   16010 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   16011 			return;
   16012 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   16013 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   16014 			return;
   16015 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   16016 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   16017 	}
   16018 }
   16019 
   16020 /*
   16021  * Workaround for pch's PHYs
   16022  * XXX should be moved to new PHY driver?
   16023  */
   16024 static int
   16025 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16026 {
   16027 	device_t dev = sc->sc_dev;
   16028 	struct mii_data *mii = &sc->sc_mii;
   16029 	struct mii_softc *child;
   16030 	uint16_t phy_data, phyrev = 0;
   16031 	int phytype = sc->sc_phytype;
   16032 	int rv;
   16033 
   16034 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16035 		device_xname(dev), __func__));
   16036 	KASSERT(sc->sc_type == WM_T_PCH);
   16037 
   16038 	/* Set MDIO slow mode before any other MDIO access */
   16039 	if (phytype == WMPHY_82577)
   16040 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16041 			return rv;
   16042 
   16043 	child = LIST_FIRST(&mii->mii_phys);
   16044 	if (child != NULL)
   16045 		phyrev = child->mii_mpd_rev;
   16046 
   16047 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16048 	if ((child != NULL) &&
   16049 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16050 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16051 		/* Disable generation of early preamble (0x4431) */
   16052 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16053 		    &phy_data);
   16054 		if (rv != 0)
   16055 			return rv;
   16056 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16057 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16058 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16059 		    phy_data);
   16060 		if (rv != 0)
   16061 			return rv;
   16062 
   16063 		/* Preamble tuning for SSC */
   16064 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16065 		if (rv != 0)
   16066 			return rv;
   16067 	}
   16068 
   16069 	/* 82578 */
   16070 	if (phytype == WMPHY_82578) {
   16071 		/*
   16072 		 * Return registers to default by doing a soft reset then
   16073 		 * writing 0x3140 to the control register
   16074 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16075 		 */
   16076 		if ((child != NULL) && (phyrev < 2)) {
   16077 			PHY_RESET(child);
   16078 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16079 			if (rv != 0)
   16080 				return rv;
   16081 		}
   16082 	}
   16083 
   16084 	/* Select page 0 */
   16085 	if ((rv = sc->phy.acquire(sc)) != 0)
   16086 		return rv;
   16087 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16088 	sc->phy.release(sc);
   16089 	if (rv != 0)
   16090 		return rv;
   16091 
   16092 	/*
   16093 	 * Configure the K1 Si workaround during phy reset assuming there is
   16094 	 * link so that it disables K1 if link is in 1Gbps.
   16095 	 */
   16096 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16097 		return rv;
   16098 
   16099 	/* Workaround for link disconnects on a busy hub in half duplex */
   16100 	rv = sc->phy.acquire(sc);
   16101 	if (rv)
   16102 		return rv;
   16103 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16104 	if (rv)
   16105 		goto release;
   16106 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16107 	    phy_data & 0x00ff);
   16108 	if (rv)
   16109 		goto release;
   16110 
   16111 	/* Set MSE higher to enable link to stay up when noise is high */
   16112 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16113 release:
   16114 	sc->phy.release(sc);
   16115 
   16116 	return rv;
   16117 }
   16118 
   16119 /*
   16120  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16121  *  @sc:   pointer to the HW structure
   16122  */
   16123 static void
   16124 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16125 {
   16126 
   16127 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16128 		device_xname(sc->sc_dev), __func__));
   16129 
   16130 	if (sc->phy.acquire(sc) != 0)
   16131 		return;
   16132 
   16133 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16134 
   16135 	sc->phy.release(sc);
   16136 }
   16137 
   16138 static void
   16139 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16140 {
   16141 	device_t dev = sc->sc_dev;
   16142 	uint32_t mac_reg;
   16143 	uint16_t i, wuce;
   16144 	int count;
   16145 
   16146 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16147 		device_xname(dev), __func__));
   16148 
   16149 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16150 		return;
   16151 
   16152 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16153 	count = wm_rar_count(sc);
   16154 	for (i = 0; i < count; i++) {
   16155 		uint16_t lo, hi;
   16156 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16157 		lo = (uint16_t)(mac_reg & 0xffff);
   16158 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16159 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16160 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16161 
   16162 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16163 		lo = (uint16_t)(mac_reg & 0xffff);
   16164 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16165 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16166 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16167 	}
   16168 
   16169 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16170 }
   16171 
   16172 /*
   16173  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16174  *  with 82579 PHY
   16175  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16176  */
   16177 static int
   16178 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16179 {
   16180 	device_t dev = sc->sc_dev;
   16181 	int rar_count;
   16182 	int rv;
   16183 	uint32_t mac_reg;
   16184 	uint16_t dft_ctrl, data;
   16185 	uint16_t i;
   16186 
   16187 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16188 		device_xname(dev), __func__));
   16189 
   16190 	if (sc->sc_type < WM_T_PCH2)
   16191 		return 0;
   16192 
   16193 	/* Acquire PHY semaphore */
   16194 	rv = sc->phy.acquire(sc);
   16195 	if (rv != 0)
   16196 		return rv;
   16197 
   16198 	/* Disable Rx path while enabling/disabling workaround */
   16199 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16200 	if (rv != 0)
   16201 		goto out;
   16202 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16203 	    dft_ctrl | (1 << 14));
   16204 	if (rv != 0)
   16205 		goto out;
   16206 
   16207 	if (enable) {
   16208 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16209 		 * SHRAL/H) and initial CRC values to the MAC
   16210 		 */
   16211 		rar_count = wm_rar_count(sc);
   16212 		for (i = 0; i < rar_count; i++) {
   16213 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16214 			uint32_t addr_high, addr_low;
   16215 
   16216 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16217 			if (!(addr_high & RAL_AV))
   16218 				continue;
   16219 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16220 			mac_addr[0] = (addr_low & 0xFF);
   16221 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16222 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16223 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16224 			mac_addr[4] = (addr_high & 0xFF);
   16225 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16226 
   16227 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16228 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16229 		}
   16230 
   16231 		/* Write Rx addresses to the PHY */
   16232 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16233 	}
   16234 
   16235 	/*
   16236 	 * If enable ==
   16237 	 *	true: Enable jumbo frame workaround in the MAC.
   16238 	 *	false: Write MAC register values back to h/w defaults.
   16239 	 */
   16240 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16241 	if (enable) {
   16242 		mac_reg &= ~(1 << 14);
   16243 		mac_reg |= (7 << 15);
   16244 	} else
   16245 		mac_reg &= ~(0xf << 14);
   16246 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16247 
   16248 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16249 	if (enable) {
   16250 		mac_reg |= RCTL_SECRC;
   16251 		sc->sc_rctl |= RCTL_SECRC;
   16252 		sc->sc_flags |= WM_F_CRC_STRIP;
   16253 	} else {
   16254 		mac_reg &= ~RCTL_SECRC;
   16255 		sc->sc_rctl &= ~RCTL_SECRC;
   16256 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16257 	}
   16258 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16259 
   16260 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16261 	if (rv != 0)
   16262 		goto out;
   16263 	if (enable)
   16264 		data |= 1 << 0;
   16265 	else
   16266 		data &= ~(1 << 0);
   16267 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16268 	if (rv != 0)
   16269 		goto out;
   16270 
   16271 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16272 	if (rv != 0)
   16273 		goto out;
   16274 	/*
   16275 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16276 	 * on both the enable case and the disable case. Is it correct?
   16277 	 */
   16278 	data &= ~(0xf << 8);
   16279 	data |= (0xb << 8);
   16280 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16281 	if (rv != 0)
   16282 		goto out;
   16283 
   16284 	/*
   16285 	 * If enable ==
   16286 	 *	true: Enable jumbo frame workaround in the PHY.
   16287 	 *	false: Write PHY register values back to h/w defaults.
   16288 	 */
   16289 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16290 	if (rv != 0)
   16291 		goto out;
   16292 	data &= ~(0x7F << 5);
   16293 	if (enable)
   16294 		data |= (0x37 << 5);
   16295 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16296 	if (rv != 0)
   16297 		goto out;
   16298 
   16299 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16300 	if (rv != 0)
   16301 		goto out;
   16302 	if (enable)
   16303 		data &= ~(1 << 13);
   16304 	else
   16305 		data |= (1 << 13);
   16306 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16307 	if (rv != 0)
   16308 		goto out;
   16309 
   16310 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16311 	if (rv != 0)
   16312 		goto out;
   16313 	data &= ~(0x3FF << 2);
   16314 	if (enable)
   16315 		data |= (I82579_TX_PTR_GAP << 2);
   16316 	else
   16317 		data |= (0x8 << 2);
   16318 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16319 	if (rv != 0)
   16320 		goto out;
   16321 
   16322 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16323 	    enable ? 0xf100 : 0x7e00);
   16324 	if (rv != 0)
   16325 		goto out;
   16326 
   16327 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16328 	if (rv != 0)
   16329 		goto out;
   16330 	if (enable)
   16331 		data |= 1 << 10;
   16332 	else
   16333 		data &= ~(1 << 10);
   16334 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16335 	if (rv != 0)
   16336 		goto out;
   16337 
   16338 	/* Re-enable Rx path after enabling/disabling workaround */
   16339 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16340 	    dft_ctrl & ~(1 << 14));
   16341 
   16342 out:
   16343 	sc->phy.release(sc);
   16344 
   16345 	return rv;
   16346 }
   16347 
   16348 /*
   16349  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16350  *  done after every PHY reset.
   16351  */
   16352 static int
   16353 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16354 {
   16355 	device_t dev = sc->sc_dev;
   16356 	int rv;
   16357 
   16358 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16359 		device_xname(dev), __func__));
   16360 	KASSERT(sc->sc_type == WM_T_PCH2);
   16361 
   16362 	/* Set MDIO slow mode before any other MDIO access */
   16363 	rv = wm_set_mdio_slow_mode_hv(sc);
   16364 	if (rv != 0)
   16365 		return rv;
   16366 
   16367 	rv = sc->phy.acquire(sc);
   16368 	if (rv != 0)
   16369 		return rv;
   16370 	/* Set MSE higher to enable link to stay up when noise is high */
   16371 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16372 	if (rv != 0)
   16373 		goto release;
   16374 	/* Drop link after 5 times MSE threshold was reached */
   16375 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16376 release:
   16377 	sc->phy.release(sc);
   16378 
   16379 	return rv;
   16380 }
   16381 
   16382 /**
   16383  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16384  *  @link: link up bool flag
   16385  *
   16386  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16387  *  preventing further DMA write requests.  Workaround the issue by disabling
   16388  *  the de-assertion of the clock request when in 1Gpbs mode.
   16389  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   16390  *  speeds in order to avoid Tx hangs.
   16391  **/
   16392 static int
   16393 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   16394 {
   16395 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   16396 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16397 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   16398 	uint16_t phyreg;
   16399 
   16400 	if (link && (speed == STATUS_SPEED_1000)) {
   16401 		sc->phy.acquire(sc);
   16402 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16403 		    &phyreg);
   16404 		if (rv != 0)
   16405 			goto release;
   16406 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16407 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   16408 		if (rv != 0)
   16409 			goto release;
   16410 		delay(20);
   16411 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   16412 
   16413 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16414 		    &phyreg);
   16415 release:
   16416 		sc->phy.release(sc);
   16417 		return rv;
   16418 	}
   16419 
   16420 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   16421 
   16422 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   16423 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   16424 	    || !link
   16425 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   16426 		goto update_fextnvm6;
   16427 
   16428 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   16429 
   16430 	/* Clear link status transmit timeout */
   16431 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   16432 	if (speed == STATUS_SPEED_100) {
   16433 		/* Set inband Tx timeout to 5x10us for 100Half */
   16434 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16435 
   16436 		/* Do not extend the K1 entry latency for 100Half */
   16437 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16438 	} else {
   16439 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   16440 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16441 
   16442 		/* Extend the K1 entry latency for 10 Mbps */
   16443 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16444 	}
   16445 
   16446 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   16447 
   16448 update_fextnvm6:
   16449 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   16450 	return 0;
   16451 }
   16452 
   16453 /*
   16454  *  wm_k1_gig_workaround_hv - K1 Si workaround
   16455  *  @sc:   pointer to the HW structure
   16456  *  @link: link up bool flag
   16457  *
   16458  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   16459  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   16460  *  If link is down, the function will restore the default K1 setting located
   16461  *  in the NVM.
   16462  */
   16463 static int
   16464 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   16465 {
   16466 	int k1_enable = sc->sc_nvm_k1_enabled;
   16467 
   16468 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16469 		device_xname(sc->sc_dev), __func__));
   16470 
   16471 	if (sc->phy.acquire(sc) != 0)
   16472 		return -1;
   16473 
   16474 	if (link) {
   16475 		k1_enable = 0;
   16476 
   16477 		/* Link stall fix for link up */
   16478 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16479 		    0x0100);
   16480 	} else {
   16481 		/* Link stall fix for link down */
   16482 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16483 		    0x4100);
   16484 	}
   16485 
   16486 	wm_configure_k1_ich8lan(sc, k1_enable);
   16487 	sc->phy.release(sc);
   16488 
   16489 	return 0;
   16490 }
   16491 
   16492 /*
   16493  *  wm_k1_workaround_lv - K1 Si workaround
   16494  *  @sc:   pointer to the HW structure
   16495  *
   16496  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16497  *  Disable K1 for 1000 and 100 speeds
   16498  */
   16499 static int
   16500 wm_k1_workaround_lv(struct wm_softc *sc)
   16501 {
   16502 	uint32_t reg;
   16503 	uint16_t phyreg;
   16504 	int rv;
   16505 
   16506 	if (sc->sc_type != WM_T_PCH2)
   16507 		return 0;
   16508 
   16509 	/* Set K1 beacon duration based on 10Mbps speed */
   16510 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16511 	if (rv != 0)
   16512 		return rv;
   16513 
   16514 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16515 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16516 		if (phyreg &
   16517 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16518 			/* LV 1G/100 Packet drop issue wa  */
   16519 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16520 			    &phyreg);
   16521 			if (rv != 0)
   16522 				return rv;
   16523 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16524 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16525 			    phyreg);
   16526 			if (rv != 0)
   16527 				return rv;
   16528 		} else {
   16529 			/* For 10Mbps */
   16530 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16531 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16532 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16533 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16534 		}
   16535 	}
   16536 
   16537 	return 0;
   16538 }
   16539 
   16540 /*
   16541  *  wm_link_stall_workaround_hv - Si workaround
   16542  *  @sc: pointer to the HW structure
   16543  *
   16544  *  This function works around a Si bug where the link partner can get
   16545  *  a link up indication before the PHY does. If small packets are sent
   16546  *  by the link partner they can be placed in the packet buffer without
   16547  *  being properly accounted for by the PHY and will stall preventing
   16548  *  further packets from being received.  The workaround is to clear the
   16549  *  packet buffer after the PHY detects link up.
   16550  */
   16551 static int
   16552 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16553 {
   16554 	uint16_t phyreg;
   16555 
   16556 	if (sc->sc_phytype != WMPHY_82578)
   16557 		return 0;
   16558 
   16559 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16560 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16561 	if ((phyreg & BMCR_LOOP) != 0)
   16562 		return 0;
   16563 
   16564 	/* Check if link is up and at 1Gbps */
   16565 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16566 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16567 	    | BM_CS_STATUS_SPEED_MASK;
   16568 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16569 		| BM_CS_STATUS_SPEED_1000))
   16570 		return 0;
   16571 
   16572 	delay(200 * 1000);	/* XXX too big */
   16573 
   16574 	/* Flush the packets in the fifo buffer */
   16575 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16576 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16577 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16578 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16579 
   16580 	return 0;
   16581 }
   16582 
   16583 static int
   16584 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16585 {
   16586 	int rv;
   16587 	uint16_t reg;
   16588 
   16589 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16590 	if (rv != 0)
   16591 		return rv;
   16592 
   16593 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16594 	    reg | HV_KMRN_MDIO_SLOW);
   16595 }
   16596 
   16597 /*
   16598  *  wm_configure_k1_ich8lan - Configure K1 power state
   16599  *  @sc: pointer to the HW structure
   16600  *  @enable: K1 state to configure
   16601  *
   16602  *  Configure the K1 power state based on the provided parameter.
   16603  *  Assumes semaphore already acquired.
   16604  */
   16605 static void
   16606 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16607 {
   16608 	uint32_t ctrl, ctrl_ext, tmp;
   16609 	uint16_t kmreg;
   16610 	int rv;
   16611 
   16612 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16613 
   16614 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16615 	if (rv != 0)
   16616 		return;
   16617 
   16618 	if (k1_enable)
   16619 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16620 	else
   16621 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16622 
   16623 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16624 	if (rv != 0)
   16625 		return;
   16626 
   16627 	delay(20);
   16628 
   16629 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16630 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16631 
   16632 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16633 	tmp |= CTRL_FRCSPD;
   16634 
   16635 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16636 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16637 	CSR_WRITE_FLUSH(sc);
   16638 	delay(20);
   16639 
   16640 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16641 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16642 	CSR_WRITE_FLUSH(sc);
   16643 	delay(20);
   16644 
   16645 	return;
   16646 }
   16647 
   16648 /* special case - for 82575 - need to do manual init ... */
   16649 static void
   16650 wm_reset_init_script_82575(struct wm_softc *sc)
   16651 {
   16652 	/*
   16653 	 * Remark: this is untested code - we have no board without EEPROM
   16654 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16655 	 */
   16656 
   16657 	/* SerDes configuration via SERDESCTRL */
   16658 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16659 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16660 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16661 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16662 
   16663 	/* CCM configuration via CCMCTL register */
   16664 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16665 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16666 
   16667 	/* PCIe lanes configuration */
   16668 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16669 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16670 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16671 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16672 
   16673 	/* PCIe PLL Configuration */
   16674 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16675 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16676 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16677 }
   16678 
   16679 static void
   16680 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16681 {
   16682 	uint32_t reg;
   16683 	uint16_t nvmword;
   16684 	int rv;
   16685 
   16686 	if (sc->sc_type != WM_T_82580)
   16687 		return;
   16688 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16689 		return;
   16690 
   16691 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16692 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16693 	if (rv != 0) {
   16694 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16695 		    __func__);
   16696 		return;
   16697 	}
   16698 
   16699 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16700 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16701 		reg |= MDICNFG_DEST;
   16702 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16703 		reg |= MDICNFG_COM_MDIO;
   16704 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16705 }
   16706 
   16707 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16708 
   16709 static bool
   16710 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16711 {
   16712 	uint32_t reg;
   16713 	uint16_t id1, id2;
   16714 	int i, rv;
   16715 
   16716 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16717 		device_xname(sc->sc_dev), __func__));
   16718 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16719 
   16720 	id1 = id2 = 0xffff;
   16721 	for (i = 0; i < 2; i++) {
   16722 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16723 		    &id1);
   16724 		if ((rv != 0) || MII_INVALIDID(id1))
   16725 			continue;
   16726 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16727 		    &id2);
   16728 		if ((rv != 0) || MII_INVALIDID(id2))
   16729 			continue;
   16730 		break;
   16731 	}
   16732 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16733 		goto out;
   16734 
   16735 	/*
   16736 	 * In case the PHY needs to be in mdio slow mode,
   16737 	 * set slow mode and try to get the PHY id again.
   16738 	 */
   16739 	rv = 0;
   16740 	if (sc->sc_type < WM_T_PCH_LPT) {
   16741 		sc->phy.release(sc);
   16742 		wm_set_mdio_slow_mode_hv(sc);
   16743 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16744 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16745 		sc->phy.acquire(sc);
   16746 	}
   16747 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16748 		device_printf(sc->sc_dev, "XXX return with false\n");
   16749 		return false;
   16750 	}
   16751 out:
   16752 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16753 		/* Only unforce SMBus if ME is not active */
   16754 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16755 			uint16_t phyreg;
   16756 
   16757 			/* Unforce SMBus mode in PHY */
   16758 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16759 			    CV_SMB_CTRL, &phyreg);
   16760 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16761 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16762 			    CV_SMB_CTRL, phyreg);
   16763 
   16764 			/* Unforce SMBus mode in MAC */
   16765 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16766 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16767 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16768 		}
   16769 	}
   16770 	return true;
   16771 }
   16772 
   16773 static void
   16774 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16775 {
   16776 	uint32_t reg;
   16777 	int i;
   16778 
   16779 	/* Set PHY Config Counter to 50msec */
   16780 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16781 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16782 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16783 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16784 
   16785 	/* Toggle LANPHYPC */
   16786 	reg = CSR_READ(sc, WMREG_CTRL);
   16787 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16788 	reg &= ~CTRL_LANPHYPC_VALUE;
   16789 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16790 	CSR_WRITE_FLUSH(sc);
   16791 	delay(1000);
   16792 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16793 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16794 	CSR_WRITE_FLUSH(sc);
   16795 
   16796 	if (sc->sc_type < WM_T_PCH_LPT)
   16797 		delay(50 * 1000);
   16798 	else {
   16799 		i = 20;
   16800 
   16801 		do {
   16802 			delay(5 * 1000);
   16803 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16804 		    && i--);
   16805 
   16806 		delay(30 * 1000);
   16807 	}
   16808 }
   16809 
   16810 static int
   16811 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16812 {
   16813 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16814 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16815 	uint32_t rxa;
   16816 	uint16_t scale = 0, lat_enc = 0;
   16817 	int32_t obff_hwm = 0;
   16818 	int64_t lat_ns, value;
   16819 
   16820 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16821 		device_xname(sc->sc_dev), __func__));
   16822 
   16823 	if (link) {
   16824 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16825 		uint32_t status;
   16826 		uint16_t speed;
   16827 		pcireg_t preg;
   16828 
   16829 		status = CSR_READ(sc, WMREG_STATUS);
   16830 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16831 		case STATUS_SPEED_10:
   16832 			speed = 10;
   16833 			break;
   16834 		case STATUS_SPEED_100:
   16835 			speed = 100;
   16836 			break;
   16837 		case STATUS_SPEED_1000:
   16838 			speed = 1000;
   16839 			break;
   16840 		default:
   16841 			device_printf(sc->sc_dev, "Unknown speed "
   16842 			    "(status = %08x)\n", status);
   16843 			return -1;
   16844 		}
   16845 
   16846 		/* Rx Packet Buffer Allocation size (KB) */
   16847 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16848 
   16849 		/*
   16850 		 * Determine the maximum latency tolerated by the device.
   16851 		 *
   16852 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16853 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16854 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16855 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16856 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16857 		 */
   16858 		lat_ns = ((int64_t)rxa * 1024 -
   16859 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16860 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16861 		if (lat_ns < 0)
   16862 			lat_ns = 0;
   16863 		else
   16864 			lat_ns /= speed;
   16865 		value = lat_ns;
   16866 
   16867 		while (value > LTRV_VALUE) {
   16868 			scale ++;
   16869 			value = howmany(value, __BIT(5));
   16870 		}
   16871 		if (scale > LTRV_SCALE_MAX) {
   16872 			device_printf(sc->sc_dev,
   16873 			    "Invalid LTR latency scale %d\n", scale);
   16874 			return -1;
   16875 		}
   16876 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16877 
   16878 		/* Determine the maximum latency tolerated by the platform */
   16879 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16880 		    WM_PCI_LTR_CAP_LPT);
   16881 		max_snoop = preg & 0xffff;
   16882 		max_nosnoop = preg >> 16;
   16883 
   16884 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16885 
   16886 		if (lat_enc > max_ltr_enc) {
   16887 			lat_enc = max_ltr_enc;
   16888 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16889 			    * PCI_LTR_SCALETONS(
   16890 				    __SHIFTOUT(lat_enc,
   16891 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16892 		}
   16893 
   16894 		if (lat_ns) {
   16895 			lat_ns *= speed * 1000;
   16896 			lat_ns /= 8;
   16897 			lat_ns /= 1000000000;
   16898 			obff_hwm = (int32_t)(rxa - lat_ns);
   16899 		}
   16900 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16901 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16902 			    "(rxa = %d, lat_ns = %d)\n",
   16903 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16904 			return -1;
   16905 		}
   16906 	}
   16907 	/* Snoop and No-Snoop latencies the same */
   16908 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16909 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16910 
   16911 	/* Set OBFF high water mark */
   16912 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16913 	reg |= obff_hwm;
   16914 	CSR_WRITE(sc, WMREG_SVT, reg);
   16915 
   16916 	/* Enable OBFF */
   16917 	reg = CSR_READ(sc, WMREG_SVCR);
   16918 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16919 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16920 
   16921 	return 0;
   16922 }
   16923 
   16924 /*
   16925  * I210 Errata 25 and I211 Errata 10
   16926  * Slow System Clock.
   16927  *
   16928  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   16929  */
   16930 static int
   16931 wm_pll_workaround_i210(struct wm_softc *sc)
   16932 {
   16933 	uint32_t mdicnfg, wuc;
   16934 	uint32_t reg;
   16935 	pcireg_t pcireg;
   16936 	uint32_t pmreg;
   16937 	uint16_t nvmword, tmp_nvmword;
   16938 	uint16_t phyval;
   16939 	bool wa_done = false;
   16940 	int i, rv = 0;
   16941 
   16942 	/* Get Power Management cap offset */
   16943 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16944 	    &pmreg, NULL) == 0)
   16945 		return -1;
   16946 
   16947 	/* Save WUC and MDICNFG registers */
   16948 	wuc = CSR_READ(sc, WMREG_WUC);
   16949 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16950 
   16951 	reg = mdicnfg & ~MDICNFG_DEST;
   16952 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16953 
   16954 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   16955 		/*
   16956 		 * The default value of the Initialization Control Word 1
   16957 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   16958 		 */
   16959 		nvmword = INVM_DEFAULT_AL;
   16960 	}
   16961 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16962 
   16963 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16964 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16965 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16966 
   16967 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16968 			rv = 0;
   16969 			break; /* OK */
   16970 		} else
   16971 			rv = -1;
   16972 
   16973 		wa_done = true;
   16974 		/* Directly reset the internal PHY */
   16975 		reg = CSR_READ(sc, WMREG_CTRL);
   16976 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16977 
   16978 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16979 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16980 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16981 
   16982 		CSR_WRITE(sc, WMREG_WUC, 0);
   16983 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16984 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16985 
   16986 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16987 		    pmreg + PCI_PMCSR);
   16988 		pcireg |= PCI_PMCSR_STATE_D3;
   16989 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16990 		    pmreg + PCI_PMCSR, pcireg);
   16991 		delay(1000);
   16992 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16993 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16994 		    pmreg + PCI_PMCSR, pcireg);
   16995 
   16996 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16997 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16998 
   16999 		/* Restore WUC register */
   17000 		CSR_WRITE(sc, WMREG_WUC, wuc);
   17001 	}
   17002 
   17003 	/* Restore MDICNFG setting */
   17004 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   17005 	if (wa_done)
   17006 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   17007 	return rv;
   17008 }
   17009 
   17010 static void
   17011 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   17012 {
   17013 	uint32_t reg;
   17014 
   17015 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17016 		device_xname(sc->sc_dev), __func__));
   17017 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   17018 	    || (sc->sc_type == WM_T_PCH_CNP));
   17019 
   17020 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   17021 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   17022 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17023 
   17024 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17025 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17026 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17027 }
   17028 
   17029 /* Sysctl function */
   17030 #ifdef WM_DEBUG
   17031 static int
   17032 wm_sysctl_debug(SYSCTLFN_ARGS)
   17033 {
   17034 	struct sysctlnode node = *rnode;
   17035 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17036 	uint32_t dflags;
   17037 	int error;
   17038 
   17039 	dflags = sc->sc_debug;
   17040 	node.sysctl_data = &dflags;
   17041 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17042 
   17043 	if (error || newp == NULL)
   17044 		return error;
   17045 
   17046 	sc->sc_debug = dflags;
   17047 
   17048 	return 0;
   17049 }
   17050 #endif
   17051