Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.708
      1 /*	$NetBSD: if_wm.c,v 1.708 2021/10/13 08:12:36 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.708 2021/10/13 08:12:36 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 #include <sys/atomic.h>
    111 
    112 #include <sys/rndsource.h>
    113 
    114 #include <net/if.h>
    115 #include <net/if_dl.h>
    116 #include <net/if_media.h>
    117 #include <net/if_ether.h>
    118 
    119 #include <net/bpf.h>
    120 
    121 #include <net/rss_config.h>
    122 
    123 #include <netinet/in.h>			/* XXX for struct ip */
    124 #include <netinet/in_systm.h>		/* XXX for struct ip */
    125 #include <netinet/ip.h>			/* XXX for struct ip */
    126 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    127 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    128 
    129 #include <sys/bus.h>
    130 #include <sys/intr.h>
    131 #include <machine/endian.h>
    132 
    133 #include <dev/mii/mii.h>
    134 #include <dev/mii/mdio.h>
    135 #include <dev/mii/miivar.h>
    136 #include <dev/mii/miidevs.h>
    137 #include <dev/mii/mii_bitbang.h>
    138 #include <dev/mii/ikphyreg.h>
    139 #include <dev/mii/igphyreg.h>
    140 #include <dev/mii/igphyvar.h>
    141 #include <dev/mii/inbmphyreg.h>
    142 #include <dev/mii/ihphyreg.h>
    143 #include <dev/mii/makphyreg.h>
    144 
    145 #include <dev/pci/pcireg.h>
    146 #include <dev/pci/pcivar.h>
    147 #include <dev/pci/pcidevs.h>
    148 
    149 #include <dev/pci/if_wmreg.h>
    150 #include <dev/pci/if_wmvar.h>
    151 
    152 #ifdef WM_DEBUG
    153 #define	WM_DEBUG_LINK		__BIT(0)
    154 #define	WM_DEBUG_TX		__BIT(1)
    155 #define	WM_DEBUG_RX		__BIT(2)
    156 #define	WM_DEBUG_GMII		__BIT(3)
    157 #define	WM_DEBUG_MANAGE		__BIT(4)
    158 #define	WM_DEBUG_NVM		__BIT(5)
    159 #define	WM_DEBUG_INIT		__BIT(6)
    160 #define	WM_DEBUG_LOCK		__BIT(7)
    161 
    162 #if 0
    163 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    164 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    165 	WM_DEBUG_LOCK
    166 #endif
    167 
    168 #define	DPRINTF(sc, x, y)			  \
    169 	do {					  \
    170 		if ((sc)->sc_debug & (x))	  \
    171 			printf y;		  \
    172 	} while (0)
    173 #else
    174 #define	DPRINTF(sc, x, y)	__nothing
    175 #endif /* WM_DEBUG */
    176 
    177 #ifdef NET_MPSAFE
    178 #define WM_MPSAFE	1
    179 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    180 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    181 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    182 #else
    183 #define WM_CALLOUT_FLAGS	0
    184 #define WM_SOFTINT_FLAGS	0
    185 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    186 #endif
    187 
    188 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    189 
    190 /*
    191  * This device driver's max interrupt numbers.
    192  */
    193 #define WM_MAX_NQUEUEINTR	16
    194 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    195 
    196 #ifndef WM_DISABLE_MSI
    197 #define	WM_DISABLE_MSI 0
    198 #endif
    199 #ifndef WM_DISABLE_MSIX
    200 #define	WM_DISABLE_MSIX 0
    201 #endif
    202 
    203 int wm_disable_msi = WM_DISABLE_MSI;
    204 int wm_disable_msix = WM_DISABLE_MSIX;
    205 
    206 #ifndef WM_WATCHDOG_TIMEOUT
    207 #define WM_WATCHDOG_TIMEOUT 5
    208 #endif
    209 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    210 
    211 /*
    212  * Transmit descriptor list size.  Due to errata, we can only have
    213  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    214  * on >= 82544. We tell the upper layers that they can queue a lot
    215  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    216  * of them at a time.
    217  *
    218  * We allow up to 64 DMA segments per packet.  Pathological packet
    219  * chains containing many small mbufs have been observed in zero-copy
    220  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    221  * m_defrag() is called to reduce it.
    222  */
    223 #define	WM_NTXSEGS		64
    224 #define	WM_IFQUEUELEN		256
    225 #define	WM_TXQUEUELEN_MAX	64
    226 #define	WM_TXQUEUELEN_MAX_82547	16
    227 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    228 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    229 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    230 #define	WM_NTXDESC_82542	256
    231 #define	WM_NTXDESC_82544	4096
    232 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    233 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    234 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    235 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    236 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    237 
    238 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    239 
    240 #define	WM_TXINTERQSIZE		256
    241 
    242 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 /*
    250  * Receive descriptor list size.  We have one Rx buffer for normal
    251  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    252  * packet.  We allocate 256 receive descriptors, each with a 2k
    253  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    254  */
    255 #define	WM_NRXDESC		256U
    256 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    257 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    258 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    259 
    260 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    261 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    262 #endif
    263 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    264 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    265 #endif
    266 
    267 typedef union txdescs {
    268 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    269 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    270 } txdescs_t;
    271 
    272 typedef union rxdescs {
    273 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    274 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    275 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    276 } rxdescs_t;
    277 
    278 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    279 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    280 
    281 /*
    282  * Software state for transmit jobs.
    283  */
    284 struct wm_txsoft {
    285 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    287 	int txs_firstdesc;		/* first descriptor in packet */
    288 	int txs_lastdesc;		/* last descriptor in packet */
    289 	int txs_ndesc;			/* # of descriptors used */
    290 };
    291 
    292 /*
    293  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    294  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    295  * them together.
    296  */
    297 struct wm_rxsoft {
    298 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    299 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    300 };
    301 
    302 #define WM_LINKUP_TIMEOUT	50
    303 
    304 static uint16_t swfwphysem[] = {
    305 	SWFW_PHY0_SM,
    306 	SWFW_PHY1_SM,
    307 	SWFW_PHY2_SM,
    308 	SWFW_PHY3_SM
    309 };
    310 
    311 static const uint32_t wm_82580_rxpbs_table[] = {
    312 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    313 };
    314 
    315 struct wm_softc;
    316 
    317 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    318 #if !defined(WM_EVENT_COUNTERS)
    319 #define WM_EVENT_COUNTERS 1
    320 #endif
    321 #endif
    322 
    323 #ifdef WM_EVENT_COUNTERS
    324 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    325 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    326 	struct evcnt qname##_ev_##evname;
    327 
    328 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    329 	do {								\
    330 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    331 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    332 		    "%s%02d%s", #qname, (qnum), #evname);		\
    333 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    334 		    (evtype), NULL, (xname),				\
    335 		    (q)->qname##_##evname##_evcnt_name);		\
    336 	} while (0)
    337 
    338 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    339 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    340 
    341 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    342 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    343 
    344 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    345 	evcnt_detach(&(q)->qname##_ev_##evname);
    346 #endif /* WM_EVENT_COUNTERS */
    347 
    348 struct wm_txqueue {
    349 	kmutex_t *txq_lock;		/* lock for tx operations */
    350 
    351 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    352 
    353 	/* Software state for the transmit descriptors. */
    354 	int txq_num;			/* must be a power of two */
    355 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    356 
    357 	/* TX control data structures. */
    358 	int txq_ndesc;			/* must be a power of two */
    359 	size_t txq_descsize;		/* a tx descriptor size */
    360 	txdescs_t *txq_descs_u;
    361 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    362 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    363 	int txq_desc_rseg;		/* real number of control segment */
    364 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    365 #define	txq_descs	txq_descs_u->sctxu_txdescs
    366 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    367 
    368 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    369 
    370 	int txq_free;			/* number of free Tx descriptors */
    371 	int txq_next;			/* next ready Tx descriptor */
    372 
    373 	int txq_sfree;			/* number of free Tx jobs */
    374 	int txq_snext;			/* next free Tx job */
    375 	int txq_sdirty;			/* dirty Tx jobs */
    376 
    377 	/* These 4 variables are used only on the 82547. */
    378 	int txq_fifo_size;		/* Tx FIFO size */
    379 	int txq_fifo_head;		/* current head of FIFO */
    380 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    381 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    382 
    383 	/*
    384 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    385 	 * CPUs. This queue intermediate them without block.
    386 	 */
    387 	pcq_t *txq_interq;
    388 
    389 	/*
    390 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    391 	 * to manage Tx H/W queue's busy flag.
    392 	 */
    393 	int txq_flags;			/* flags for H/W queue, see below */
    394 #define	WM_TXQ_NO_SPACE		0x1
    395 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    396 
    397 	bool txq_stopping;
    398 
    399 	bool txq_sending;
    400 	time_t txq_lastsent;
    401 
    402 	/* Checksum flags used for previous packet */
    403 	uint32_t	txq_last_hw_cmd;
    404 	uint8_t		txq_last_hw_fields;
    405 	uint16_t	txq_last_hw_ipcs;
    406 	uint16_t	txq_last_hw_tucs;
    407 
    408 	uint32_t txq_packets;		/* for AIM */
    409 	uint32_t txq_bytes;		/* for AIM */
    410 #ifdef WM_EVENT_COUNTERS
    411 	/* TX event counters */
    412 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    413 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    414 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    415 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    416 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    417 					    /* XXX not used? */
    418 
    419 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    420 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    421 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    422 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    423 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    424 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    425 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    426 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    427 					    /* other than toomanyseg */
    428 
    429 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    430 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    431 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    432 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
    433 
    434 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    435 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    436 #endif /* WM_EVENT_COUNTERS */
    437 };
    438 
    439 struct wm_rxqueue {
    440 	kmutex_t *rxq_lock;		/* lock for rx operations */
    441 
    442 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    443 
    444 	/* Software state for the receive descriptors. */
    445 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    446 
    447 	/* RX control data structures. */
    448 	int rxq_ndesc;			/* must be a power of two */
    449 	size_t rxq_descsize;		/* a rx descriptor size */
    450 	rxdescs_t *rxq_descs_u;
    451 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    452 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    453 	int rxq_desc_rseg;		/* real number of control segment */
    454 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    455 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    456 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    457 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    458 
    459 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    460 
    461 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    462 	int rxq_discard;
    463 	int rxq_len;
    464 	struct mbuf *rxq_head;
    465 	struct mbuf *rxq_tail;
    466 	struct mbuf **rxq_tailp;
    467 
    468 	bool rxq_stopping;
    469 
    470 	uint32_t rxq_packets;		/* for AIM */
    471 	uint32_t rxq_bytes;		/* for AIM */
    472 #ifdef WM_EVENT_COUNTERS
    473 	/* RX event counters */
    474 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    475 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    476 
    477 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    478 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    479 #endif
    480 };
    481 
    482 struct wm_queue {
    483 	int wmq_id;			/* index of TX/RX queues */
    484 	int wmq_intr_idx;		/* index of MSI-X tables */
    485 
    486 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    487 	bool wmq_set_itr;
    488 
    489 	struct wm_txqueue wmq_txq;
    490 	struct wm_rxqueue wmq_rxq;
    491 	char sysctlname[32];		/* Name for sysctl */
    492 
    493 	bool wmq_txrx_use_workqueue;
    494 	struct work wmq_cookie;
    495 	void *wmq_si;
    496 };
    497 
    498 struct wm_phyop {
    499 	int (*acquire)(struct wm_softc *);
    500 	void (*release)(struct wm_softc *);
    501 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    502 	int (*writereg_locked)(device_t, int, int, uint16_t);
    503 	int reset_delay_us;
    504 	bool no_errprint;
    505 };
    506 
    507 struct wm_nvmop {
    508 	int (*acquire)(struct wm_softc *);
    509 	void (*release)(struct wm_softc *);
    510 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    511 };
    512 
    513 /*
    514  * Software state per device.
    515  */
    516 struct wm_softc {
    517 	device_t sc_dev;		/* generic device information */
    518 	bus_space_tag_t sc_st;		/* bus space tag */
    519 	bus_space_handle_t sc_sh;	/* bus space handle */
    520 	bus_size_t sc_ss;		/* bus space size */
    521 	bus_space_tag_t sc_iot;		/* I/O space tag */
    522 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    523 	bus_size_t sc_ios;		/* I/O space size */
    524 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    525 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    526 	bus_size_t sc_flashs;		/* flash registers space size */
    527 	off_t sc_flashreg_offset;	/*
    528 					 * offset to flash registers from
    529 					 * start of BAR
    530 					 */
    531 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    532 
    533 	struct ethercom sc_ethercom;	/* ethernet common data */
    534 	struct mii_data sc_mii;		/* MII/media information */
    535 
    536 	pci_chipset_tag_t sc_pc;
    537 	pcitag_t sc_pcitag;
    538 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    539 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    540 
    541 	uint16_t sc_pcidevid;		/* PCI device ID */
    542 	wm_chip_type sc_type;		/* MAC type */
    543 	int sc_rev;			/* MAC revision */
    544 	wm_phy_type sc_phytype;		/* PHY type */
    545 	uint8_t sc_sfptype;		/* SFP type */
    546 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    547 #define	WM_MEDIATYPE_UNKNOWN		0x00
    548 #define	WM_MEDIATYPE_FIBER		0x01
    549 #define	WM_MEDIATYPE_COPPER		0x02
    550 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    551 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    552 	int sc_flags;			/* flags; see below */
    553 	u_short sc_if_flags;		/* last if_flags */
    554 	int sc_ec_capenable;		/* last ec_capenable */
    555 	int sc_flowflags;		/* 802.3x flow control flags */
    556 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    557 	int sc_align_tweak;
    558 
    559 	void *sc_ihs[WM_MAX_NINTR];	/*
    560 					 * interrupt cookie.
    561 					 * - legacy and msi use sc_ihs[0] only
    562 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    563 					 */
    564 	pci_intr_handle_t *sc_intrs;	/*
    565 					 * legacy and msi use sc_intrs[0] only
    566 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    567 					 */
    568 	int sc_nintrs;			/* number of interrupts */
    569 
    570 	int sc_link_intr_idx;		/* index of MSI-X tables */
    571 
    572 	callout_t sc_tick_ch;		/* tick callout */
    573 	bool sc_core_stopping;
    574 
    575 	int sc_nvm_ver_major;
    576 	int sc_nvm_ver_minor;
    577 	int sc_nvm_ver_build;
    578 	int sc_nvm_addrbits;		/* NVM address bits */
    579 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    580 	int sc_ich8_flash_base;
    581 	int sc_ich8_flash_bank_size;
    582 	int sc_nvm_k1_enabled;
    583 
    584 	int sc_nqueues;
    585 	struct wm_queue *sc_queue;
    586 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    587 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    588 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    589 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    590 	struct workqueue *sc_queue_wq;
    591 	bool sc_txrx_use_workqueue;
    592 
    593 	int sc_affinity_offset;
    594 
    595 #ifdef WM_EVENT_COUNTERS
    596 	/* Event counters. */
    597 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    598 
    599 	/* WM_T_82542_2_1 only */
    600 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    601 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    602 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    603 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    604 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    605 #endif /* WM_EVENT_COUNTERS */
    606 
    607 	struct sysctllog *sc_sysctllog;
    608 
    609 	/* This variable are used only on the 82547. */
    610 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    611 
    612 	uint32_t sc_ctrl;		/* prototype CTRL register */
    613 #if 0
    614 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    615 #endif
    616 	uint32_t sc_icr;		/* prototype interrupt bits */
    617 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    618 	uint32_t sc_tctl;		/* prototype TCTL register */
    619 	uint32_t sc_rctl;		/* prototype RCTL register */
    620 	uint32_t sc_txcw;		/* prototype TXCW register */
    621 	uint32_t sc_tipg;		/* prototype TIPG register */
    622 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    623 	uint32_t sc_pba;		/* prototype PBA register */
    624 
    625 	int sc_tbi_linkup;		/* TBI link status */
    626 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    627 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    628 
    629 	int sc_mchash_type;		/* multicast filter offset */
    630 
    631 	krndsource_t rnd_source;	/* random source */
    632 
    633 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    634 
    635 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    636 	kmutex_t *sc_ich_phymtx;	/*
    637 					 * 82574/82583/ICH/PCH specific PHY
    638 					 * mutex. For 82574/82583, the mutex
    639 					 * is used for both PHY and NVM.
    640 					 */
    641 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    642 
    643 	struct wm_phyop phy;
    644 	struct wm_nvmop nvm;
    645 #ifdef WM_DEBUG
    646 	uint32_t sc_debug;
    647 #endif
    648 };
    649 
    650 #define WM_CORE_LOCK(_sc)						\
    651 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    652 #define WM_CORE_UNLOCK(_sc)						\
    653 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    654 #define WM_CORE_LOCKED(_sc)						\
    655 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    656 
    657 #define	WM_RXCHAIN_RESET(rxq)						\
    658 do {									\
    659 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    660 	*(rxq)->rxq_tailp = NULL;					\
    661 	(rxq)->rxq_len = 0;						\
    662 } while (/*CONSTCOND*/0)
    663 
    664 #define	WM_RXCHAIN_LINK(rxq, m)						\
    665 do {									\
    666 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    667 	(rxq)->rxq_tailp = &(m)->m_next;				\
    668 } while (/*CONSTCOND*/0)
    669 
    670 #ifdef WM_EVENT_COUNTERS
    671 #ifdef __HAVE_ATOMIC64_LOADSTORE
    672 #define	WM_EVCNT_INCR(ev)						\
    673 	atomic_store_relaxed(&((ev)->ev_count),				\
    674 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    675 #define	WM_EVCNT_ADD(ev, val)						\
    676 	atomic_store_relaxed(&((ev)->ev_count),				\
    677 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    678 #else
    679 #define	WM_EVCNT_INCR(ev)						\
    680 	((ev)->ev_count)++
    681 #define	WM_EVCNT_ADD(ev, val)						\
    682 	(ev)->ev_count += (val)
    683 #endif
    684 
    685 #define WM_Q_EVCNT_INCR(qname, evname)			\
    686 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    687 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    688 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    689 #else /* !WM_EVENT_COUNTERS */
    690 #define	WM_EVCNT_INCR(ev)	/* nothing */
    691 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    692 
    693 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    694 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    695 #endif /* !WM_EVENT_COUNTERS */
    696 
    697 #define	CSR_READ(sc, reg)						\
    698 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    699 #define	CSR_WRITE(sc, reg, val)						\
    700 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    701 #define	CSR_WRITE_FLUSH(sc)						\
    702 	(void)CSR_READ((sc), WMREG_STATUS)
    703 
    704 #define ICH8_FLASH_READ32(sc, reg)					\
    705 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    706 	    (reg) + sc->sc_flashreg_offset)
    707 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    708 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    709 	    (reg) + sc->sc_flashreg_offset, (data))
    710 
    711 #define ICH8_FLASH_READ16(sc, reg)					\
    712 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    713 	    (reg) + sc->sc_flashreg_offset)
    714 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    715 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    716 	    (reg) + sc->sc_flashreg_offset, (data))
    717 
    718 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    719 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    720 
    721 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    722 #define	WM_CDTXADDR_HI(txq, x)						\
    723 	(sizeof(bus_addr_t) == 8 ?					\
    724 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    725 
    726 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    727 #define	WM_CDRXADDR_HI(rxq, x)						\
    728 	(sizeof(bus_addr_t) == 8 ?					\
    729 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    730 
    731 /*
    732  * Register read/write functions.
    733  * Other than CSR_{READ|WRITE}().
    734  */
    735 #if 0
    736 static inline uint32_t wm_io_read(struct wm_softc *, int);
    737 #endif
    738 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    739 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    740     uint32_t, uint32_t);
    741 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    742 
    743 /*
    744  * Descriptor sync/init functions.
    745  */
    746 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    747 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    748 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    749 
    750 /*
    751  * Device driver interface functions and commonly used functions.
    752  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    753  */
    754 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    755 static int	wm_match(device_t, cfdata_t, void *);
    756 static void	wm_attach(device_t, device_t, void *);
    757 static int	wm_detach(device_t, int);
    758 static bool	wm_suspend(device_t, const pmf_qual_t *);
    759 static bool	wm_resume(device_t, const pmf_qual_t *);
    760 static void	wm_watchdog(struct ifnet *);
    761 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    762     uint16_t *);
    763 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    764     uint16_t *);
    765 static void	wm_tick(void *);
    766 static int	wm_ifflags_cb(struct ethercom *);
    767 static int	wm_ioctl(struct ifnet *, u_long, void *);
    768 /* MAC address related */
    769 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    770 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    771 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    772 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    773 static int	wm_rar_count(struct wm_softc *);
    774 static void	wm_set_filter(struct wm_softc *);
    775 /* Reset and init related */
    776 static void	wm_set_vlan(struct wm_softc *);
    777 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    778 static void	wm_get_auto_rd_done(struct wm_softc *);
    779 static void	wm_lan_init_done(struct wm_softc *);
    780 static void	wm_get_cfg_done(struct wm_softc *);
    781 static int	wm_phy_post_reset(struct wm_softc *);
    782 static int	wm_write_smbus_addr(struct wm_softc *);
    783 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    784 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    785 static void	wm_initialize_hardware_bits(struct wm_softc *);
    786 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    787 static int	wm_reset_phy(struct wm_softc *);
    788 static void	wm_flush_desc_rings(struct wm_softc *);
    789 static void	wm_reset(struct wm_softc *);
    790 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    791 static void	wm_rxdrain(struct wm_rxqueue *);
    792 static void	wm_init_rss(struct wm_softc *);
    793 static void	wm_adjust_qnum(struct wm_softc *, int);
    794 static inline bool	wm_is_using_msix(struct wm_softc *);
    795 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    796 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    797 static int	wm_setup_legacy(struct wm_softc *);
    798 static int	wm_setup_msix(struct wm_softc *);
    799 static int	wm_init(struct ifnet *);
    800 static int	wm_init_locked(struct ifnet *);
    801 static void	wm_init_sysctls(struct wm_softc *);
    802 static void	wm_unset_stopping_flags(struct wm_softc *);
    803 static void	wm_set_stopping_flags(struct wm_softc *);
    804 static void	wm_stop(struct ifnet *, int);
    805 static void	wm_stop_locked(struct ifnet *, bool, bool);
    806 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    807 static void	wm_82547_txfifo_stall(void *);
    808 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    809 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    810 /* DMA related */
    811 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    812 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    813 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    814 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    815     struct wm_txqueue *);
    816 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    817 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    818 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    819     struct wm_rxqueue *);
    820 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    821 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    822 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    823 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    824 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    825 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    826 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    827     struct wm_txqueue *);
    828 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    829     struct wm_rxqueue *);
    830 static int	wm_alloc_txrx_queues(struct wm_softc *);
    831 static void	wm_free_txrx_queues(struct wm_softc *);
    832 static int	wm_init_txrx_queues(struct wm_softc *);
    833 /* Start */
    834 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    835     struct wm_txsoft *, uint32_t *, uint8_t *);
    836 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    837 static void	wm_start(struct ifnet *);
    838 static void	wm_start_locked(struct ifnet *);
    839 static int	wm_transmit(struct ifnet *, struct mbuf *);
    840 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    841 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    842 		    bool);
    843 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    844     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    845 static void	wm_nq_start(struct ifnet *);
    846 static void	wm_nq_start_locked(struct ifnet *);
    847 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    848 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    849 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    850 		    bool);
    851 static void	wm_deferred_start_locked(struct wm_txqueue *);
    852 static void	wm_handle_queue(void *);
    853 static void	wm_handle_queue_work(struct work *, void *);
    854 /* Interrupt */
    855 static bool	wm_txeof(struct wm_txqueue *, u_int);
    856 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    857 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    858 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    859 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    860 static void	wm_linkintr(struct wm_softc *, uint32_t);
    861 static int	wm_intr_legacy(void *);
    862 static inline void	wm_txrxintr_disable(struct wm_queue *);
    863 static inline void	wm_txrxintr_enable(struct wm_queue *);
    864 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    865 static int	wm_txrxintr_msix(void *);
    866 static int	wm_linkintr_msix(void *);
    867 
    868 /*
    869  * Media related.
    870  * GMII, SGMII, TBI, SERDES and SFP.
    871  */
    872 /* Common */
    873 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    874 /* GMII related */
    875 static void	wm_gmii_reset(struct wm_softc *);
    876 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    877 static int	wm_get_phy_id_82575(struct wm_softc *);
    878 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    879 static int	wm_gmii_mediachange(struct ifnet *);
    880 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    881 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    882 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    883 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    884 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    885 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    886 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    887 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    888 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    889 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    890 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    891 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    892 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    893 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    894 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    895 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    896 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    897 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    898 	bool);
    899 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    900 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    901 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    902 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    903 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    904 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    905 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    906 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    907 static void	wm_gmii_statchg(struct ifnet *);
    908 /*
    909  * kumeran related (80003, ICH* and PCH*).
    910  * These functions are not for accessing MII registers but for accessing
    911  * kumeran specific registers.
    912  */
    913 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    914 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    915 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    916 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    917 /* EMI register related */
    918 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    919 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    920 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    921 /* SGMII */
    922 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    923 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    924 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    925 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    926 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    927 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    928 /* TBI related */
    929 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    930 static void	wm_tbi_mediainit(struct wm_softc *);
    931 static int	wm_tbi_mediachange(struct ifnet *);
    932 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    933 static int	wm_check_for_link(struct wm_softc *);
    934 static void	wm_tbi_tick(struct wm_softc *);
    935 /* SERDES related */
    936 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    937 static int	wm_serdes_mediachange(struct ifnet *);
    938 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    939 static void	wm_serdes_tick(struct wm_softc *);
    940 /* SFP related */
    941 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    942 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    943 
    944 /*
    945  * NVM related.
    946  * Microwire, SPI (w/wo EERD) and Flash.
    947  */
    948 /* Misc functions */
    949 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    950 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    951 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    952 /* Microwire */
    953 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    954 /* SPI */
    955 static int	wm_nvm_ready_spi(struct wm_softc *);
    956 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    957 /* Using with EERD */
    958 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    959 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    960 /* Flash */
    961 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    962     unsigned int *);
    963 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    964 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    965 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    966     uint32_t *);
    967 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    968 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    969 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    970 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    971 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    972 /* iNVM */
    973 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    974 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    975 /* Lock, detecting NVM type, validate checksum and read */
    976 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    977 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    978 static int	wm_nvm_validate_checksum(struct wm_softc *);
    979 static void	wm_nvm_version_invm(struct wm_softc *);
    980 static void	wm_nvm_version(struct wm_softc *);
    981 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    982 
    983 /*
    984  * Hardware semaphores.
    985  * Very complexed...
    986  */
    987 static int	wm_get_null(struct wm_softc *);
    988 static void	wm_put_null(struct wm_softc *);
    989 static int	wm_get_eecd(struct wm_softc *);
    990 static void	wm_put_eecd(struct wm_softc *);
    991 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    992 static void	wm_put_swsm_semaphore(struct wm_softc *);
    993 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    994 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    995 static int	wm_get_nvm_80003(struct wm_softc *);
    996 static void	wm_put_nvm_80003(struct wm_softc *);
    997 static int	wm_get_nvm_82571(struct wm_softc *);
    998 static void	wm_put_nvm_82571(struct wm_softc *);
    999 static int	wm_get_phy_82575(struct wm_softc *);
   1000 static void	wm_put_phy_82575(struct wm_softc *);
   1001 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1002 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1003 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1004 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1005 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1006 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1007 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1008 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1009 
   1010 /*
   1011  * Management mode and power management related subroutines.
   1012  * BMC, AMT, suspend/resume and EEE.
   1013  */
   1014 #if 0
   1015 static int	wm_check_mng_mode(struct wm_softc *);
   1016 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1017 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1018 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1019 #endif
   1020 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1021 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1022 static void	wm_get_hw_control(struct wm_softc *);
   1023 static void	wm_release_hw_control(struct wm_softc *);
   1024 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1025 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1026 static void	wm_init_manageability(struct wm_softc *);
   1027 static void	wm_release_manageability(struct wm_softc *);
   1028 static void	wm_get_wakeup(struct wm_softc *);
   1029 static int	wm_ulp_disable(struct wm_softc *);
   1030 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1031 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1032 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1033 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1034 static void	wm_enable_wakeup(struct wm_softc *);
   1035 static void	wm_disable_aspm(struct wm_softc *);
   1036 /* LPLU (Low Power Link Up) */
   1037 static void	wm_lplu_d0_disable(struct wm_softc *);
   1038 /* EEE */
   1039 static int	wm_set_eee_i350(struct wm_softc *);
   1040 static int	wm_set_eee_pchlan(struct wm_softc *);
   1041 static int	wm_set_eee(struct wm_softc *);
   1042 
   1043 /*
   1044  * Workarounds (mainly PHY related).
   1045  * Basically, PHY's workarounds are in the PHY drivers.
   1046  */
   1047 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1048 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1049 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1050 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1051 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1052 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1053 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1054 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1055 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1056 static int	wm_k1_workaround_lv(struct wm_softc *);
   1057 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1058 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1059 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1060 static void	wm_reset_init_script_82575(struct wm_softc *);
   1061 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1062 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1063 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1064 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1065 static int	wm_pll_workaround_i210(struct wm_softc *);
   1066 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1067 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1068 static void	wm_set_linkdown_discard(struct wm_softc *);
   1069 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1070 
   1071 #ifdef WM_DEBUG
   1072 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1073 #endif
   1074 
   1075 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1076     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1077 
   1078 /*
   1079  * Devices supported by this driver.
   1080  */
   1081 static const struct wm_product {
   1082 	pci_vendor_id_t		wmp_vendor;
   1083 	pci_product_id_t	wmp_product;
   1084 	const char		*wmp_name;
   1085 	wm_chip_type		wmp_type;
   1086 	uint32_t		wmp_flags;
   1087 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1088 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1089 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1090 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1091 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1092 } wm_products[] = {
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1094 	  "Intel i82542 1000BASE-X Ethernet",
   1095 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1096 
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1098 	  "Intel i82543GC 1000BASE-X Ethernet",
   1099 	  WM_T_82543,		WMP_F_FIBER },
   1100 
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1102 	  "Intel i82543GC 1000BASE-T Ethernet",
   1103 	  WM_T_82543,		WMP_F_COPPER },
   1104 
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1106 	  "Intel i82544EI 1000BASE-T Ethernet",
   1107 	  WM_T_82544,		WMP_F_COPPER },
   1108 
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1110 	  "Intel i82544EI 1000BASE-X Ethernet",
   1111 	  WM_T_82544,		WMP_F_FIBER },
   1112 
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1114 	  "Intel i82544GC 1000BASE-T Ethernet",
   1115 	  WM_T_82544,		WMP_F_COPPER },
   1116 
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1118 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1119 	  WM_T_82544,		WMP_F_COPPER },
   1120 
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1122 	  "Intel i82540EM 1000BASE-T Ethernet",
   1123 	  WM_T_82540,		WMP_F_COPPER },
   1124 
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1126 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1127 	  WM_T_82540,		WMP_F_COPPER },
   1128 
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1130 	  "Intel i82540EP 1000BASE-T Ethernet",
   1131 	  WM_T_82540,		WMP_F_COPPER },
   1132 
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1134 	  "Intel i82540EP 1000BASE-T Ethernet",
   1135 	  WM_T_82540,		WMP_F_COPPER },
   1136 
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1138 	  "Intel i82540EP 1000BASE-T Ethernet",
   1139 	  WM_T_82540,		WMP_F_COPPER },
   1140 
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1142 	  "Intel i82545EM 1000BASE-T Ethernet",
   1143 	  WM_T_82545,		WMP_F_COPPER },
   1144 
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1146 	  "Intel i82545GM 1000BASE-T Ethernet",
   1147 	  WM_T_82545_3,		WMP_F_COPPER },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1150 	  "Intel i82545GM 1000BASE-X Ethernet",
   1151 	  WM_T_82545_3,		WMP_F_FIBER },
   1152 
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1154 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1155 	  WM_T_82545_3,		WMP_F_SERDES },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1158 	  "Intel i82546EB 1000BASE-T Ethernet",
   1159 	  WM_T_82546,		WMP_F_COPPER },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1162 	  "Intel i82546EB 1000BASE-T Ethernet",
   1163 	  WM_T_82546,		WMP_F_COPPER },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1166 	  "Intel i82545EM 1000BASE-X Ethernet",
   1167 	  WM_T_82545,		WMP_F_FIBER },
   1168 
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1170 	  "Intel i82546EB 1000BASE-X Ethernet",
   1171 	  WM_T_82546,		WMP_F_FIBER },
   1172 
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1174 	  "Intel i82546GB 1000BASE-T Ethernet",
   1175 	  WM_T_82546_3,		WMP_F_COPPER },
   1176 
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1178 	  "Intel i82546GB 1000BASE-X Ethernet",
   1179 	  WM_T_82546_3,		WMP_F_FIBER },
   1180 
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1182 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1183 	  WM_T_82546_3,		WMP_F_SERDES },
   1184 
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1186 	  "i82546GB quad-port Gigabit Ethernet",
   1187 	  WM_T_82546_3,		WMP_F_COPPER },
   1188 
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1190 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1191 	  WM_T_82546_3,		WMP_F_COPPER },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1194 	  "Intel PRO/1000MT (82546GB)",
   1195 	  WM_T_82546_3,		WMP_F_COPPER },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1198 	  "Intel i82541EI 1000BASE-T Ethernet",
   1199 	  WM_T_82541,		WMP_F_COPPER },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1202 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1203 	  WM_T_82541,		WMP_F_COPPER },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1206 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1207 	  WM_T_82541,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1210 	  "Intel i82541ER 1000BASE-T Ethernet",
   1211 	  WM_T_82541_2,		WMP_F_COPPER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1214 	  "Intel i82541GI 1000BASE-T Ethernet",
   1215 	  WM_T_82541_2,		WMP_F_COPPER },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1218 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1219 	  WM_T_82541_2,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1222 	  "Intel i82541PI 1000BASE-T Ethernet",
   1223 	  WM_T_82541_2,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1226 	  "Intel i82547EI 1000BASE-T Ethernet",
   1227 	  WM_T_82547,		WMP_F_COPPER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1230 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1231 	  WM_T_82547,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1234 	  "Intel i82547GI 1000BASE-T Ethernet",
   1235 	  WM_T_82547_2,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1238 	  "Intel PRO/1000 PT (82571EB)",
   1239 	  WM_T_82571,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1242 	  "Intel PRO/1000 PF (82571EB)",
   1243 	  WM_T_82571,		WMP_F_FIBER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1246 	  "Intel PRO/1000 PB (82571EB)",
   1247 	  WM_T_82571,		WMP_F_SERDES },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1250 	  "Intel PRO/1000 QT (82571EB)",
   1251 	  WM_T_82571,		WMP_F_COPPER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1254 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1255 	  WM_T_82571,		WMP_F_COPPER },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1258 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1259 	  WM_T_82571,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1262 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1263 	  WM_T_82571,		WMP_F_SERDES },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1266 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1267 	  WM_T_82571,		WMP_F_SERDES },
   1268 
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1270 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1271 	  WM_T_82571,		WMP_F_FIBER },
   1272 
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1274 	  "Intel i82572EI 1000baseT Ethernet",
   1275 	  WM_T_82572,		WMP_F_COPPER },
   1276 
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1278 	  "Intel i82572EI 1000baseX Ethernet",
   1279 	  WM_T_82572,		WMP_F_FIBER },
   1280 
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1282 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1283 	  WM_T_82572,		WMP_F_SERDES },
   1284 
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1286 	  "Intel i82572EI 1000baseT Ethernet",
   1287 	  WM_T_82572,		WMP_F_COPPER },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1290 	  "Intel i82573E",
   1291 	  WM_T_82573,		WMP_F_COPPER },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1294 	  "Intel i82573E IAMT",
   1295 	  WM_T_82573,		WMP_F_COPPER },
   1296 
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1298 	  "Intel i82573L Gigabit Ethernet",
   1299 	  WM_T_82573,		WMP_F_COPPER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1302 	  "Intel i82574L",
   1303 	  WM_T_82574,		WMP_F_COPPER },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1306 	  "Intel i82574L",
   1307 	  WM_T_82574,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1310 	  "Intel i82583V",
   1311 	  WM_T_82583,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1314 	  "i80003 dual 1000baseT Ethernet",
   1315 	  WM_T_80003,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1318 	  "i80003 dual 1000baseX Ethernet",
   1319 	  WM_T_80003,		WMP_F_COPPER },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1322 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1323 	  WM_T_80003,		WMP_F_SERDES },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1326 	  "Intel i80003 1000baseT Ethernet",
   1327 	  WM_T_80003,		WMP_F_COPPER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1330 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1331 	  WM_T_80003,		WMP_F_SERDES },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1334 	  "Intel i82801H (M_AMT) LAN Controller",
   1335 	  WM_T_ICH8,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1337 	  "Intel i82801H (AMT) LAN Controller",
   1338 	  WM_T_ICH8,		WMP_F_COPPER },
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1340 	  "Intel i82801H LAN Controller",
   1341 	  WM_T_ICH8,		WMP_F_COPPER },
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1343 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1344 	  WM_T_ICH8,		WMP_F_COPPER },
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1346 	  "Intel i82801H (M) LAN Controller",
   1347 	  WM_T_ICH8,		WMP_F_COPPER },
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1349 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1350 	  WM_T_ICH8,		WMP_F_COPPER },
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1352 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1353 	  WM_T_ICH8,		WMP_F_COPPER },
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1355 	  "82567V-3 LAN Controller",
   1356 	  WM_T_ICH8,		WMP_F_COPPER },
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1358 	  "82801I (AMT) LAN Controller",
   1359 	  WM_T_ICH9,		WMP_F_COPPER },
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1361 	  "82801I 10/100 LAN Controller",
   1362 	  WM_T_ICH9,		WMP_F_COPPER },
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1364 	  "82801I (G) 10/100 LAN Controller",
   1365 	  WM_T_ICH9,		WMP_F_COPPER },
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1367 	  "82801I (GT) 10/100 LAN Controller",
   1368 	  WM_T_ICH9,		WMP_F_COPPER },
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1370 	  "82801I (C) LAN Controller",
   1371 	  WM_T_ICH9,		WMP_F_COPPER },
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1373 	  "82801I mobile LAN Controller",
   1374 	  WM_T_ICH9,		WMP_F_COPPER },
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1376 	  "82801I mobile (V) LAN Controller",
   1377 	  WM_T_ICH9,		WMP_F_COPPER },
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1379 	  "82801I mobile (AMT) LAN Controller",
   1380 	  WM_T_ICH9,		WMP_F_COPPER },
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1382 	  "82567LM-4 LAN Controller",
   1383 	  WM_T_ICH9,		WMP_F_COPPER },
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1385 	  "82567LM-2 LAN Controller",
   1386 	  WM_T_ICH10,		WMP_F_COPPER },
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1388 	  "82567LF-2 LAN Controller",
   1389 	  WM_T_ICH10,		WMP_F_COPPER },
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1391 	  "82567LM-3 LAN Controller",
   1392 	  WM_T_ICH10,		WMP_F_COPPER },
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1394 	  "82567LF-3 LAN Controller",
   1395 	  WM_T_ICH10,		WMP_F_COPPER },
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1397 	  "82567V-2 LAN Controller",
   1398 	  WM_T_ICH10,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1400 	  "82567V-3? LAN Controller",
   1401 	  WM_T_ICH10,		WMP_F_COPPER },
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1403 	  "HANKSVILLE LAN Controller",
   1404 	  WM_T_ICH10,		WMP_F_COPPER },
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1406 	  "PCH LAN (82577LM) Controller",
   1407 	  WM_T_PCH,		WMP_F_COPPER },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1409 	  "PCH LAN (82577LC) Controller",
   1410 	  WM_T_PCH,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1412 	  "PCH LAN (82578DM) Controller",
   1413 	  WM_T_PCH,		WMP_F_COPPER },
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1415 	  "PCH LAN (82578DC) Controller",
   1416 	  WM_T_PCH,		WMP_F_COPPER },
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1418 	  "PCH2 LAN (82579LM) Controller",
   1419 	  WM_T_PCH2,		WMP_F_COPPER },
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1421 	  "PCH2 LAN (82579V) Controller",
   1422 	  WM_T_PCH2,		WMP_F_COPPER },
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1424 	  "82575EB dual-1000baseT Ethernet",
   1425 	  WM_T_82575,		WMP_F_COPPER },
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1427 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1428 	  WM_T_82575,		WMP_F_SERDES },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1430 	  "82575GB quad-1000baseT Ethernet",
   1431 	  WM_T_82575,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1433 	  "82575GB quad-1000baseT Ethernet (PM)",
   1434 	  WM_T_82575,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1436 	  "82576 1000BaseT Ethernet",
   1437 	  WM_T_82576,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1439 	  "82576 1000BaseX Ethernet",
   1440 	  WM_T_82576,		WMP_F_FIBER },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1443 	  "82576 gigabit Ethernet (SERDES)",
   1444 	  WM_T_82576,		WMP_F_SERDES },
   1445 
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1447 	  "82576 quad-1000BaseT Ethernet",
   1448 	  WM_T_82576,		WMP_F_COPPER },
   1449 
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1451 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1452 	  WM_T_82576,		WMP_F_COPPER },
   1453 
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1455 	  "82576 gigabit Ethernet",
   1456 	  WM_T_82576,		WMP_F_COPPER },
   1457 
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1459 	  "82576 gigabit Ethernet (SERDES)",
   1460 	  WM_T_82576,		WMP_F_SERDES },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1462 	  "82576 quad-gigabit Ethernet (SERDES)",
   1463 	  WM_T_82576,		WMP_F_SERDES },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1466 	  "82580 1000BaseT Ethernet",
   1467 	  WM_T_82580,		WMP_F_COPPER },
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1469 	  "82580 1000BaseX Ethernet",
   1470 	  WM_T_82580,		WMP_F_FIBER },
   1471 
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1473 	  "82580 1000BaseT Ethernet (SERDES)",
   1474 	  WM_T_82580,		WMP_F_SERDES },
   1475 
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1477 	  "82580 gigabit Ethernet (SGMII)",
   1478 	  WM_T_82580,		WMP_F_COPPER },
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1480 	  "82580 dual-1000BaseT Ethernet",
   1481 	  WM_T_82580,		WMP_F_COPPER },
   1482 
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1484 	  "82580 quad-1000BaseX Ethernet",
   1485 	  WM_T_82580,		WMP_F_FIBER },
   1486 
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1488 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1489 	  WM_T_82580,		WMP_F_COPPER },
   1490 
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1492 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1493 	  WM_T_82580,		WMP_F_SERDES },
   1494 
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1496 	  "DH89XXCC 1000BASE-KX Ethernet",
   1497 	  WM_T_82580,		WMP_F_SERDES },
   1498 
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1500 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1501 	  WM_T_82580,		WMP_F_SERDES },
   1502 
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1504 	  "I350 Gigabit Network Connection",
   1505 	  WM_T_I350,		WMP_F_COPPER },
   1506 
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1508 	  "I350 Gigabit Fiber Network Connection",
   1509 	  WM_T_I350,		WMP_F_FIBER },
   1510 
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1512 	  "I350 Gigabit Backplane Connection",
   1513 	  WM_T_I350,		WMP_F_SERDES },
   1514 
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1516 	  "I350 Quad Port Gigabit Ethernet",
   1517 	  WM_T_I350,		WMP_F_SERDES },
   1518 
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1520 	  "I350 Gigabit Connection",
   1521 	  WM_T_I350,		WMP_F_COPPER },
   1522 
   1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1524 	  "I354 Gigabit Ethernet (KX)",
   1525 	  WM_T_I354,		WMP_F_SERDES },
   1526 
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1528 	  "I354 Gigabit Ethernet (SGMII)",
   1529 	  WM_T_I354,		WMP_F_COPPER },
   1530 
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1532 	  "I354 Gigabit Ethernet (2.5G)",
   1533 	  WM_T_I354,		WMP_F_COPPER },
   1534 
   1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1536 	  "I210-T1 Ethernet Server Adapter",
   1537 	  WM_T_I210,		WMP_F_COPPER },
   1538 
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1540 	  "I210 Ethernet (Copper OEM)",
   1541 	  WM_T_I210,		WMP_F_COPPER },
   1542 
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1544 	  "I210 Ethernet (Copper IT)",
   1545 	  WM_T_I210,		WMP_F_COPPER },
   1546 
   1547 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1548 	  "I210 Ethernet (Copper, FLASH less)",
   1549 	  WM_T_I210,		WMP_F_COPPER },
   1550 
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1552 	  "I210 Gigabit Ethernet (Fiber)",
   1553 	  WM_T_I210,		WMP_F_FIBER },
   1554 
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1556 	  "I210 Gigabit Ethernet (SERDES)",
   1557 	  WM_T_I210,		WMP_F_SERDES },
   1558 
   1559 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1560 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1561 	  WM_T_I210,		WMP_F_SERDES },
   1562 
   1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1564 	  "I210 Gigabit Ethernet (SGMII)",
   1565 	  WM_T_I210,		WMP_F_COPPER },
   1566 
   1567 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1568 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1569 	  WM_T_I210,		WMP_F_COPPER },
   1570 
   1571 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1572 	  "I211 Ethernet (COPPER)",
   1573 	  WM_T_I211,		WMP_F_COPPER },
   1574 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1575 	  "I217 V Ethernet Connection",
   1576 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1577 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1578 	  "I217 LM Ethernet Connection",
   1579 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1580 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1581 	  "I218 V Ethernet Connection",
   1582 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1583 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1584 	  "I218 V Ethernet Connection",
   1585 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1586 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1587 	  "I218 V Ethernet Connection",
   1588 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1589 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1590 	  "I218 LM Ethernet Connection",
   1591 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1592 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1593 	  "I218 LM Ethernet Connection",
   1594 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1595 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1596 	  "I218 LM Ethernet Connection",
   1597 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1598 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1599 	  "I219 LM Ethernet Connection",
   1600 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1601 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1602 	  "I219 LM (2) Ethernet Connection",
   1603 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1604 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1605 	  "I219 LM (3) Ethernet Connection",
   1606 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1607 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1608 	  "I219 LM (4) Ethernet Connection",
   1609 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1610 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1611 	  "I219 LM (5) Ethernet Connection",
   1612 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1613 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1614 	  "I219 LM (6) Ethernet Connection",
   1615 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1616 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1617 	  "I219 LM (7) Ethernet Connection",
   1618 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1619 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1620 	  "I219 LM (8) Ethernet Connection",
   1621 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1622 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1623 	  "I219 LM (9) Ethernet Connection",
   1624 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1625 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1626 	  "I219 LM (10) Ethernet Connection",
   1627 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1628 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1629 	  "I219 LM (11) Ethernet Connection",
   1630 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1632 	  "I219 LM (12) Ethernet Connection",
   1633 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1635 	  "I219 LM (13) Ethernet Connection",
   1636 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1638 	  "I219 LM (14) Ethernet Connection",
   1639 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1640 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1641 	  "I219 LM (15) Ethernet Connection",
   1642 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1644 	  "I219 LM (16) Ethernet Connection",
   1645 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1647 	  "I219 LM (17) Ethernet Connection",
   1648 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1650 	  "I219 LM (18) Ethernet Connection",
   1651 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1652 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1653 	  "I219 LM (19) Ethernet Connection",
   1654 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1656 	  "I219 V Ethernet Connection",
   1657 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1659 	  "I219 V (2) Ethernet Connection",
   1660 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1662 	  "I219 V (4) Ethernet Connection",
   1663 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1664 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1665 	  "I219 V (5) Ethernet Connection",
   1666 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1668 	  "I219 V (6) Ethernet Connection",
   1669 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1671 	  "I219 V (7) Ethernet Connection",
   1672 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1674 	  "I219 V (8) Ethernet Connection",
   1675 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1676 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1677 	  "I219 V (9) Ethernet Connection",
   1678 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1679 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1680 	  "I219 V (10) Ethernet Connection",
   1681 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1682 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1683 	  "I219 V (11) Ethernet Connection",
   1684 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1685 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1686 	  "I219 V (12) Ethernet Connection",
   1687 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1688 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1689 	  "I219 V (13) Ethernet Connection",
   1690 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1691 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1692 	  "I219 V (14) Ethernet Connection",
   1693 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1694 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1695 	  "I219 V (15) Ethernet Connection",
   1696 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1698 	  "I219 V (16) Ethernet Connection",
   1699 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1700 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1701 	  "I219 V (17) Ethernet Connection",
   1702 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1703 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1704 	  "I219 V (18) Ethernet Connection",
   1705 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1706 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1707 	  "I219 V (19) Ethernet Connection",
   1708 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1709 	{ 0,			0,
   1710 	  NULL,
   1711 	  0,			0 },
   1712 };
   1713 
   1714 /*
   1715  * Register read/write functions.
   1716  * Other than CSR_{READ|WRITE}().
   1717  */
   1718 
   1719 #if 0 /* Not currently used */
   1720 static inline uint32_t
   1721 wm_io_read(struct wm_softc *sc, int reg)
   1722 {
   1723 
   1724 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1725 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1726 }
   1727 #endif
   1728 
   1729 static inline void
   1730 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1731 {
   1732 
   1733 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1734 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1735 }
   1736 
   1737 static inline void
   1738 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1739     uint32_t data)
   1740 {
   1741 	uint32_t regval;
   1742 	int i;
   1743 
   1744 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1745 
   1746 	CSR_WRITE(sc, reg, regval);
   1747 
   1748 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1749 		delay(5);
   1750 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1751 			break;
   1752 	}
   1753 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1754 		aprint_error("%s: WARNING:"
   1755 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1756 		    device_xname(sc->sc_dev), reg);
   1757 	}
   1758 }
   1759 
   1760 static inline void
   1761 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1762 {
   1763 	wa->wa_low = htole32(v & 0xffffffffU);
   1764 	if (sizeof(bus_addr_t) == 8)
   1765 		wa->wa_high = htole32((uint64_t) v >> 32);
   1766 	else
   1767 		wa->wa_high = 0;
   1768 }
   1769 
   1770 /*
   1771  * Descriptor sync/init functions.
   1772  */
   1773 static inline void
   1774 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1775 {
   1776 	struct wm_softc *sc = txq->txq_sc;
   1777 
   1778 	/* If it will wrap around, sync to the end of the ring. */
   1779 	if ((start + num) > WM_NTXDESC(txq)) {
   1780 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1781 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1782 		    (WM_NTXDESC(txq) - start), ops);
   1783 		num -= (WM_NTXDESC(txq) - start);
   1784 		start = 0;
   1785 	}
   1786 
   1787 	/* Now sync whatever is left. */
   1788 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1789 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1790 }
   1791 
   1792 static inline void
   1793 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1794 {
   1795 	struct wm_softc *sc = rxq->rxq_sc;
   1796 
   1797 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1798 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1799 }
   1800 
   1801 static inline void
   1802 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1803 {
   1804 	struct wm_softc *sc = rxq->rxq_sc;
   1805 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1806 	struct mbuf *m = rxs->rxs_mbuf;
   1807 
   1808 	/*
   1809 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1810 	 * so that the payload after the Ethernet header is aligned
   1811 	 * to a 4-byte boundary.
   1812 
   1813 	 * XXX BRAINDAMAGE ALERT!
   1814 	 * The stupid chip uses the same size for every buffer, which
   1815 	 * is set in the Receive Control register.  We are using the 2K
   1816 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1817 	 * reason, we can't "scoot" packets longer than the standard
   1818 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1819 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1820 	 * the upper layer copy the headers.
   1821 	 */
   1822 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1823 
   1824 	if (sc->sc_type == WM_T_82574) {
   1825 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1826 		rxd->erx_data.erxd_addr =
   1827 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1828 		rxd->erx_data.erxd_dd = 0;
   1829 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1830 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1831 
   1832 		rxd->nqrx_data.nrxd_paddr =
   1833 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1834 		/* Currently, split header is not supported. */
   1835 		rxd->nqrx_data.nrxd_haddr = 0;
   1836 	} else {
   1837 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1838 
   1839 		wm_set_dma_addr(&rxd->wrx_addr,
   1840 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1841 		rxd->wrx_len = 0;
   1842 		rxd->wrx_cksum = 0;
   1843 		rxd->wrx_status = 0;
   1844 		rxd->wrx_errors = 0;
   1845 		rxd->wrx_special = 0;
   1846 	}
   1847 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1848 
   1849 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1850 }
   1851 
   1852 /*
   1853  * Device driver interface functions and commonly used functions.
   1854  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1855  */
   1856 
   1857 /* Lookup supported device table */
   1858 static const struct wm_product *
   1859 wm_lookup(const struct pci_attach_args *pa)
   1860 {
   1861 	const struct wm_product *wmp;
   1862 
   1863 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1864 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1865 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1866 			return wmp;
   1867 	}
   1868 	return NULL;
   1869 }
   1870 
   1871 /* The match function (ca_match) */
   1872 static int
   1873 wm_match(device_t parent, cfdata_t cf, void *aux)
   1874 {
   1875 	struct pci_attach_args *pa = aux;
   1876 
   1877 	if (wm_lookup(pa) != NULL)
   1878 		return 1;
   1879 
   1880 	return 0;
   1881 }
   1882 
   1883 /* The attach function (ca_attach) */
   1884 static void
   1885 wm_attach(device_t parent, device_t self, void *aux)
   1886 {
   1887 	struct wm_softc *sc = device_private(self);
   1888 	struct pci_attach_args *pa = aux;
   1889 	prop_dictionary_t dict;
   1890 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1891 	pci_chipset_tag_t pc = pa->pa_pc;
   1892 	int counts[PCI_INTR_TYPE_SIZE];
   1893 	pci_intr_type_t max_type;
   1894 	const char *eetype, *xname;
   1895 	bus_space_tag_t memt;
   1896 	bus_space_handle_t memh;
   1897 	bus_size_t memsize;
   1898 	int memh_valid;
   1899 	int i, error;
   1900 	const struct wm_product *wmp;
   1901 	prop_data_t ea;
   1902 	prop_number_t pn;
   1903 	uint8_t enaddr[ETHER_ADDR_LEN];
   1904 	char buf[256];
   1905 	char wqname[MAXCOMLEN];
   1906 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1907 	pcireg_t preg, memtype;
   1908 	uint16_t eeprom_data, apme_mask;
   1909 	bool force_clear_smbi;
   1910 	uint32_t link_mode;
   1911 	uint32_t reg;
   1912 
   1913 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1914 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1915 #endif
   1916 	sc->sc_dev = self;
   1917 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1918 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1919 	sc->sc_core_stopping = false;
   1920 
   1921 	wmp = wm_lookup(pa);
   1922 #ifdef DIAGNOSTIC
   1923 	if (wmp == NULL) {
   1924 		printf("\n");
   1925 		panic("wm_attach: impossible");
   1926 	}
   1927 #endif
   1928 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1929 
   1930 	sc->sc_pc = pa->pa_pc;
   1931 	sc->sc_pcitag = pa->pa_tag;
   1932 
   1933 	if (pci_dma64_available(pa))
   1934 		sc->sc_dmat = pa->pa_dmat64;
   1935 	else
   1936 		sc->sc_dmat = pa->pa_dmat;
   1937 
   1938 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1939 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1940 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1941 
   1942 	sc->sc_type = wmp->wmp_type;
   1943 
   1944 	/* Set default function pointers */
   1945 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1946 	sc->phy.release = sc->nvm.release = wm_put_null;
   1947 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1948 
   1949 	if (sc->sc_type < WM_T_82543) {
   1950 		if (sc->sc_rev < 2) {
   1951 			aprint_error_dev(sc->sc_dev,
   1952 			    "i82542 must be at least rev. 2\n");
   1953 			return;
   1954 		}
   1955 		if (sc->sc_rev < 3)
   1956 			sc->sc_type = WM_T_82542_2_0;
   1957 	}
   1958 
   1959 	/*
   1960 	 * Disable MSI for Errata:
   1961 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1962 	 *
   1963 	 *  82544: Errata 25
   1964 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1965 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1966 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1967 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1968 	 *
   1969 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1970 	 *
   1971 	 *  82571 & 82572: Errata 63
   1972 	 */
   1973 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1974 	    || (sc->sc_type == WM_T_82572))
   1975 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1976 
   1977 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1978 	    || (sc->sc_type == WM_T_82580)
   1979 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1980 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1981 		sc->sc_flags |= WM_F_NEWQUEUE;
   1982 
   1983 	/* Set device properties (mactype) */
   1984 	dict = device_properties(sc->sc_dev);
   1985 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1986 
   1987 	/*
   1988 	 * Map the device.  All devices support memory-mapped acccess,
   1989 	 * and it is really required for normal operation.
   1990 	 */
   1991 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1992 	switch (memtype) {
   1993 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1994 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1995 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1996 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1997 		break;
   1998 	default:
   1999 		memh_valid = 0;
   2000 		break;
   2001 	}
   2002 
   2003 	if (memh_valid) {
   2004 		sc->sc_st = memt;
   2005 		sc->sc_sh = memh;
   2006 		sc->sc_ss = memsize;
   2007 	} else {
   2008 		aprint_error_dev(sc->sc_dev,
   2009 		    "unable to map device registers\n");
   2010 		return;
   2011 	}
   2012 
   2013 	/*
   2014 	 * In addition, i82544 and later support I/O mapped indirect
   2015 	 * register access.  It is not desirable (nor supported in
   2016 	 * this driver) to use it for normal operation, though it is
   2017 	 * required to work around bugs in some chip versions.
   2018 	 */
   2019 	if (sc->sc_type >= WM_T_82544) {
   2020 		/* First we have to find the I/O BAR. */
   2021 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2022 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2023 			if (memtype == PCI_MAPREG_TYPE_IO)
   2024 				break;
   2025 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2026 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2027 				i += 4;	/* skip high bits, too */
   2028 		}
   2029 		if (i < PCI_MAPREG_END) {
   2030 			/*
   2031 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2032 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2033 			 * It's no problem because newer chips has no this
   2034 			 * bug.
   2035 			 *
   2036 			 * The i8254x doesn't apparently respond when the
   2037 			 * I/O BAR is 0, which looks somewhat like it's not
   2038 			 * been configured.
   2039 			 */
   2040 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2041 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2042 				aprint_error_dev(sc->sc_dev,
   2043 				    "WARNING: I/O BAR at zero.\n");
   2044 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2045 					0, &sc->sc_iot, &sc->sc_ioh,
   2046 					NULL, &sc->sc_ios) == 0) {
   2047 				sc->sc_flags |= WM_F_IOH_VALID;
   2048 			} else
   2049 				aprint_error_dev(sc->sc_dev,
   2050 				    "WARNING: unable to map I/O space\n");
   2051 		}
   2052 
   2053 	}
   2054 
   2055 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2056 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2057 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2058 	if (sc->sc_type < WM_T_82542_2_1)
   2059 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2060 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2061 
   2062 	/* Power up chip */
   2063 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2064 	    && error != EOPNOTSUPP) {
   2065 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2066 		return;
   2067 	}
   2068 
   2069 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2070 	/*
   2071 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2072 	 * resource.
   2073 	 */
   2074 	if (sc->sc_nqueues > 1) {
   2075 		max_type = PCI_INTR_TYPE_MSIX;
   2076 		/*
   2077 		 *  82583 has a MSI-X capability in the PCI configuration space
   2078 		 * but it doesn't support it. At least the document doesn't
   2079 		 * say anything about MSI-X.
   2080 		 */
   2081 		counts[PCI_INTR_TYPE_MSIX]
   2082 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2083 	} else {
   2084 		max_type = PCI_INTR_TYPE_MSI;
   2085 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2086 	}
   2087 
   2088 	/* Allocation settings */
   2089 	counts[PCI_INTR_TYPE_MSI] = 1;
   2090 	counts[PCI_INTR_TYPE_INTX] = 1;
   2091 	/* overridden by disable flags */
   2092 	if (wm_disable_msi != 0) {
   2093 		counts[PCI_INTR_TYPE_MSI] = 0;
   2094 		if (wm_disable_msix != 0) {
   2095 			max_type = PCI_INTR_TYPE_INTX;
   2096 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2097 		}
   2098 	} else if (wm_disable_msix != 0) {
   2099 		max_type = PCI_INTR_TYPE_MSI;
   2100 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2101 	}
   2102 
   2103 alloc_retry:
   2104 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2105 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2106 		return;
   2107 	}
   2108 
   2109 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2110 		error = wm_setup_msix(sc);
   2111 		if (error) {
   2112 			pci_intr_release(pc, sc->sc_intrs,
   2113 			    counts[PCI_INTR_TYPE_MSIX]);
   2114 
   2115 			/* Setup for MSI: Disable MSI-X */
   2116 			max_type = PCI_INTR_TYPE_MSI;
   2117 			counts[PCI_INTR_TYPE_MSI] = 1;
   2118 			counts[PCI_INTR_TYPE_INTX] = 1;
   2119 			goto alloc_retry;
   2120 		}
   2121 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2122 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2123 		error = wm_setup_legacy(sc);
   2124 		if (error) {
   2125 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2126 			    counts[PCI_INTR_TYPE_MSI]);
   2127 
   2128 			/* The next try is for INTx: Disable MSI */
   2129 			max_type = PCI_INTR_TYPE_INTX;
   2130 			counts[PCI_INTR_TYPE_INTX] = 1;
   2131 			goto alloc_retry;
   2132 		}
   2133 	} else {
   2134 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2135 		error = wm_setup_legacy(sc);
   2136 		if (error) {
   2137 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2138 			    counts[PCI_INTR_TYPE_INTX]);
   2139 			return;
   2140 		}
   2141 	}
   2142 
   2143 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2144 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2145 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2146 	    WM_WORKQUEUE_FLAGS);
   2147 	if (error) {
   2148 		aprint_error_dev(sc->sc_dev,
   2149 		    "unable to create workqueue\n");
   2150 		goto out;
   2151 	}
   2152 
   2153 	/*
   2154 	 * Check the function ID (unit number of the chip).
   2155 	 */
   2156 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2157 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2158 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2159 	    || (sc->sc_type == WM_T_82580)
   2160 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2161 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2162 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2163 	else
   2164 		sc->sc_funcid = 0;
   2165 
   2166 	/*
   2167 	 * Determine a few things about the bus we're connected to.
   2168 	 */
   2169 	if (sc->sc_type < WM_T_82543) {
   2170 		/* We don't really know the bus characteristics here. */
   2171 		sc->sc_bus_speed = 33;
   2172 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2173 		/*
   2174 		 * CSA (Communication Streaming Architecture) is about as fast
   2175 		 * a 32-bit 66MHz PCI Bus.
   2176 		 */
   2177 		sc->sc_flags |= WM_F_CSA;
   2178 		sc->sc_bus_speed = 66;
   2179 		aprint_verbose_dev(sc->sc_dev,
   2180 		    "Communication Streaming Architecture\n");
   2181 		if (sc->sc_type == WM_T_82547) {
   2182 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2183 			callout_setfunc(&sc->sc_txfifo_ch,
   2184 			    wm_82547_txfifo_stall, sc);
   2185 			aprint_verbose_dev(sc->sc_dev,
   2186 			    "using 82547 Tx FIFO stall work-around\n");
   2187 		}
   2188 	} else if (sc->sc_type >= WM_T_82571) {
   2189 		sc->sc_flags |= WM_F_PCIE;
   2190 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2191 		    && (sc->sc_type != WM_T_ICH10)
   2192 		    && (sc->sc_type != WM_T_PCH)
   2193 		    && (sc->sc_type != WM_T_PCH2)
   2194 		    && (sc->sc_type != WM_T_PCH_LPT)
   2195 		    && (sc->sc_type != WM_T_PCH_SPT)
   2196 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2197 			/* ICH* and PCH* have no PCIe capability registers */
   2198 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2199 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2200 				NULL) == 0)
   2201 				aprint_error_dev(sc->sc_dev,
   2202 				    "unable to find PCIe capability\n");
   2203 		}
   2204 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2205 	} else {
   2206 		reg = CSR_READ(sc, WMREG_STATUS);
   2207 		if (reg & STATUS_BUS64)
   2208 			sc->sc_flags |= WM_F_BUS64;
   2209 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2210 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2211 
   2212 			sc->sc_flags |= WM_F_PCIX;
   2213 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2214 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2215 				aprint_error_dev(sc->sc_dev,
   2216 				    "unable to find PCIX capability\n");
   2217 			else if (sc->sc_type != WM_T_82545_3 &&
   2218 				 sc->sc_type != WM_T_82546_3) {
   2219 				/*
   2220 				 * Work around a problem caused by the BIOS
   2221 				 * setting the max memory read byte count
   2222 				 * incorrectly.
   2223 				 */
   2224 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2225 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2226 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2227 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2228 
   2229 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2230 				    PCIX_CMD_BYTECNT_SHIFT;
   2231 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2232 				    PCIX_STATUS_MAXB_SHIFT;
   2233 				if (bytecnt > maxb) {
   2234 					aprint_verbose_dev(sc->sc_dev,
   2235 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2236 					    512 << bytecnt, 512 << maxb);
   2237 					pcix_cmd = (pcix_cmd &
   2238 					    ~PCIX_CMD_BYTECNT_MASK) |
   2239 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2240 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2241 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2242 					    pcix_cmd);
   2243 				}
   2244 			}
   2245 		}
   2246 		/*
   2247 		 * The quad port adapter is special; it has a PCIX-PCIX
   2248 		 * bridge on the board, and can run the secondary bus at
   2249 		 * a higher speed.
   2250 		 */
   2251 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2252 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2253 								      : 66;
   2254 		} else if (sc->sc_flags & WM_F_PCIX) {
   2255 			switch (reg & STATUS_PCIXSPD_MASK) {
   2256 			case STATUS_PCIXSPD_50_66:
   2257 				sc->sc_bus_speed = 66;
   2258 				break;
   2259 			case STATUS_PCIXSPD_66_100:
   2260 				sc->sc_bus_speed = 100;
   2261 				break;
   2262 			case STATUS_PCIXSPD_100_133:
   2263 				sc->sc_bus_speed = 133;
   2264 				break;
   2265 			default:
   2266 				aprint_error_dev(sc->sc_dev,
   2267 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2268 				    reg & STATUS_PCIXSPD_MASK);
   2269 				sc->sc_bus_speed = 66;
   2270 				break;
   2271 			}
   2272 		} else
   2273 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2274 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2275 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2276 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2277 	}
   2278 
   2279 	/* clear interesting stat counters */
   2280 	CSR_READ(sc, WMREG_COLC);
   2281 	CSR_READ(sc, WMREG_RXERRC);
   2282 
   2283 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2284 	    || (sc->sc_type >= WM_T_ICH8))
   2285 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2286 	if (sc->sc_type >= WM_T_ICH8)
   2287 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2288 
   2289 	/* Set PHY, NVM mutex related stuff */
   2290 	switch (sc->sc_type) {
   2291 	case WM_T_82542_2_0:
   2292 	case WM_T_82542_2_1:
   2293 	case WM_T_82543:
   2294 	case WM_T_82544:
   2295 		/* Microwire */
   2296 		sc->nvm.read = wm_nvm_read_uwire;
   2297 		sc->sc_nvm_wordsize = 64;
   2298 		sc->sc_nvm_addrbits = 6;
   2299 		break;
   2300 	case WM_T_82540:
   2301 	case WM_T_82545:
   2302 	case WM_T_82545_3:
   2303 	case WM_T_82546:
   2304 	case WM_T_82546_3:
   2305 		/* Microwire */
   2306 		sc->nvm.read = wm_nvm_read_uwire;
   2307 		reg = CSR_READ(sc, WMREG_EECD);
   2308 		if (reg & EECD_EE_SIZE) {
   2309 			sc->sc_nvm_wordsize = 256;
   2310 			sc->sc_nvm_addrbits = 8;
   2311 		} else {
   2312 			sc->sc_nvm_wordsize = 64;
   2313 			sc->sc_nvm_addrbits = 6;
   2314 		}
   2315 		sc->sc_flags |= WM_F_LOCK_EECD;
   2316 		sc->nvm.acquire = wm_get_eecd;
   2317 		sc->nvm.release = wm_put_eecd;
   2318 		break;
   2319 	case WM_T_82541:
   2320 	case WM_T_82541_2:
   2321 	case WM_T_82547:
   2322 	case WM_T_82547_2:
   2323 		reg = CSR_READ(sc, WMREG_EECD);
   2324 		/*
   2325 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2326 		 * on 8254[17], so set flags and functios before calling it.
   2327 		 */
   2328 		sc->sc_flags |= WM_F_LOCK_EECD;
   2329 		sc->nvm.acquire = wm_get_eecd;
   2330 		sc->nvm.release = wm_put_eecd;
   2331 		if (reg & EECD_EE_TYPE) {
   2332 			/* SPI */
   2333 			sc->nvm.read = wm_nvm_read_spi;
   2334 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2335 			wm_nvm_set_addrbits_size_eecd(sc);
   2336 		} else {
   2337 			/* Microwire */
   2338 			sc->nvm.read = wm_nvm_read_uwire;
   2339 			if ((reg & EECD_EE_ABITS) != 0) {
   2340 				sc->sc_nvm_wordsize = 256;
   2341 				sc->sc_nvm_addrbits = 8;
   2342 			} else {
   2343 				sc->sc_nvm_wordsize = 64;
   2344 				sc->sc_nvm_addrbits = 6;
   2345 			}
   2346 		}
   2347 		break;
   2348 	case WM_T_82571:
   2349 	case WM_T_82572:
   2350 		/* SPI */
   2351 		sc->nvm.read = wm_nvm_read_eerd;
   2352 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2353 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2354 		wm_nvm_set_addrbits_size_eecd(sc);
   2355 		sc->phy.acquire = wm_get_swsm_semaphore;
   2356 		sc->phy.release = wm_put_swsm_semaphore;
   2357 		sc->nvm.acquire = wm_get_nvm_82571;
   2358 		sc->nvm.release = wm_put_nvm_82571;
   2359 		break;
   2360 	case WM_T_82573:
   2361 	case WM_T_82574:
   2362 	case WM_T_82583:
   2363 		sc->nvm.read = wm_nvm_read_eerd;
   2364 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2365 		if (sc->sc_type == WM_T_82573) {
   2366 			sc->phy.acquire = wm_get_swsm_semaphore;
   2367 			sc->phy.release = wm_put_swsm_semaphore;
   2368 			sc->nvm.acquire = wm_get_nvm_82571;
   2369 			sc->nvm.release = wm_put_nvm_82571;
   2370 		} else {
   2371 			/* Both PHY and NVM use the same semaphore. */
   2372 			sc->phy.acquire = sc->nvm.acquire
   2373 			    = wm_get_swfwhw_semaphore;
   2374 			sc->phy.release = sc->nvm.release
   2375 			    = wm_put_swfwhw_semaphore;
   2376 		}
   2377 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2378 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2379 			sc->sc_nvm_wordsize = 2048;
   2380 		} else {
   2381 			/* SPI */
   2382 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2383 			wm_nvm_set_addrbits_size_eecd(sc);
   2384 		}
   2385 		break;
   2386 	case WM_T_82575:
   2387 	case WM_T_82576:
   2388 	case WM_T_82580:
   2389 	case WM_T_I350:
   2390 	case WM_T_I354:
   2391 	case WM_T_80003:
   2392 		/* SPI */
   2393 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2394 		wm_nvm_set_addrbits_size_eecd(sc);
   2395 		if ((sc->sc_type == WM_T_80003)
   2396 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2397 			sc->nvm.read = wm_nvm_read_eerd;
   2398 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2399 		} else {
   2400 			sc->nvm.read = wm_nvm_read_spi;
   2401 			sc->sc_flags |= WM_F_LOCK_EECD;
   2402 		}
   2403 		sc->phy.acquire = wm_get_phy_82575;
   2404 		sc->phy.release = wm_put_phy_82575;
   2405 		sc->nvm.acquire = wm_get_nvm_80003;
   2406 		sc->nvm.release = wm_put_nvm_80003;
   2407 		break;
   2408 	case WM_T_ICH8:
   2409 	case WM_T_ICH9:
   2410 	case WM_T_ICH10:
   2411 	case WM_T_PCH:
   2412 	case WM_T_PCH2:
   2413 	case WM_T_PCH_LPT:
   2414 		sc->nvm.read = wm_nvm_read_ich8;
   2415 		/* FLASH */
   2416 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2417 		sc->sc_nvm_wordsize = 2048;
   2418 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2419 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2420 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2421 			aprint_error_dev(sc->sc_dev,
   2422 			    "can't map FLASH registers\n");
   2423 			goto out;
   2424 		}
   2425 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2426 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2427 		    ICH_FLASH_SECTOR_SIZE;
   2428 		sc->sc_ich8_flash_bank_size =
   2429 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2430 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2431 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2432 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2433 		sc->sc_flashreg_offset = 0;
   2434 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2435 		sc->phy.release = wm_put_swflag_ich8lan;
   2436 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2437 		sc->nvm.release = wm_put_nvm_ich8lan;
   2438 		break;
   2439 	case WM_T_PCH_SPT:
   2440 	case WM_T_PCH_CNP:
   2441 		sc->nvm.read = wm_nvm_read_spt;
   2442 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2443 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2444 		sc->sc_flasht = sc->sc_st;
   2445 		sc->sc_flashh = sc->sc_sh;
   2446 		sc->sc_ich8_flash_base = 0;
   2447 		sc->sc_nvm_wordsize =
   2448 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2449 		    * NVM_SIZE_MULTIPLIER;
   2450 		/* It is size in bytes, we want words */
   2451 		sc->sc_nvm_wordsize /= 2;
   2452 		/* Assume 2 banks */
   2453 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2454 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2455 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2456 		sc->phy.release = wm_put_swflag_ich8lan;
   2457 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2458 		sc->nvm.release = wm_put_nvm_ich8lan;
   2459 		break;
   2460 	case WM_T_I210:
   2461 	case WM_T_I211:
   2462 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2463 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2464 		if (wm_nvm_flash_presence_i210(sc)) {
   2465 			sc->nvm.read = wm_nvm_read_eerd;
   2466 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2467 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2468 			wm_nvm_set_addrbits_size_eecd(sc);
   2469 		} else {
   2470 			sc->nvm.read = wm_nvm_read_invm;
   2471 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2472 			sc->sc_nvm_wordsize = INVM_SIZE;
   2473 		}
   2474 		sc->phy.acquire = wm_get_phy_82575;
   2475 		sc->phy.release = wm_put_phy_82575;
   2476 		sc->nvm.acquire = wm_get_nvm_80003;
   2477 		sc->nvm.release = wm_put_nvm_80003;
   2478 		break;
   2479 	default:
   2480 		break;
   2481 	}
   2482 
   2483 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2484 	switch (sc->sc_type) {
   2485 	case WM_T_82571:
   2486 	case WM_T_82572:
   2487 		reg = CSR_READ(sc, WMREG_SWSM2);
   2488 		if ((reg & SWSM2_LOCK) == 0) {
   2489 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2490 			force_clear_smbi = true;
   2491 		} else
   2492 			force_clear_smbi = false;
   2493 		break;
   2494 	case WM_T_82573:
   2495 	case WM_T_82574:
   2496 	case WM_T_82583:
   2497 		force_clear_smbi = true;
   2498 		break;
   2499 	default:
   2500 		force_clear_smbi = false;
   2501 		break;
   2502 	}
   2503 	if (force_clear_smbi) {
   2504 		reg = CSR_READ(sc, WMREG_SWSM);
   2505 		if ((reg & SWSM_SMBI) != 0)
   2506 			aprint_error_dev(sc->sc_dev,
   2507 			    "Please update the Bootagent\n");
   2508 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2509 	}
   2510 
   2511 	/*
   2512 	 * Defer printing the EEPROM type until after verifying the checksum
   2513 	 * This allows the EEPROM type to be printed correctly in the case
   2514 	 * that no EEPROM is attached.
   2515 	 */
   2516 	/*
   2517 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2518 	 * this for later, so we can fail future reads from the EEPROM.
   2519 	 */
   2520 	if (wm_nvm_validate_checksum(sc)) {
   2521 		/*
   2522 		 * Read twice again because some PCI-e parts fail the
   2523 		 * first check due to the link being in sleep state.
   2524 		 */
   2525 		if (wm_nvm_validate_checksum(sc))
   2526 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2527 	}
   2528 
   2529 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2530 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2531 	else {
   2532 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2533 		    sc->sc_nvm_wordsize);
   2534 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2535 			aprint_verbose("iNVM");
   2536 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2537 			aprint_verbose("FLASH(HW)");
   2538 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2539 			aprint_verbose("FLASH");
   2540 		else {
   2541 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2542 				eetype = "SPI";
   2543 			else
   2544 				eetype = "MicroWire";
   2545 			aprint_verbose("(%d address bits) %s EEPROM",
   2546 			    sc->sc_nvm_addrbits, eetype);
   2547 		}
   2548 	}
   2549 	wm_nvm_version(sc);
   2550 	aprint_verbose("\n");
   2551 
   2552 	/*
   2553 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2554 	 * incorrect.
   2555 	 */
   2556 	wm_gmii_setup_phytype(sc, 0, 0);
   2557 
   2558 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2559 	switch (sc->sc_type) {
   2560 	case WM_T_ICH8:
   2561 	case WM_T_ICH9:
   2562 	case WM_T_ICH10:
   2563 	case WM_T_PCH:
   2564 	case WM_T_PCH2:
   2565 	case WM_T_PCH_LPT:
   2566 	case WM_T_PCH_SPT:
   2567 	case WM_T_PCH_CNP:
   2568 		apme_mask = WUC_APME;
   2569 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2570 		if ((eeprom_data & apme_mask) != 0)
   2571 			sc->sc_flags |= WM_F_WOL;
   2572 		break;
   2573 	default:
   2574 		break;
   2575 	}
   2576 
   2577 	/* Reset the chip to a known state. */
   2578 	wm_reset(sc);
   2579 
   2580 	/*
   2581 	 * Check for I21[01] PLL workaround.
   2582 	 *
   2583 	 * Three cases:
   2584 	 * a) Chip is I211.
   2585 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2586 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2587 	 */
   2588 	if (sc->sc_type == WM_T_I211)
   2589 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2590 	if (sc->sc_type == WM_T_I210) {
   2591 		if (!wm_nvm_flash_presence_i210(sc))
   2592 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2593 		else if ((sc->sc_nvm_ver_major < 3)
   2594 		    || ((sc->sc_nvm_ver_major == 3)
   2595 			&& (sc->sc_nvm_ver_minor < 25))) {
   2596 			aprint_verbose_dev(sc->sc_dev,
   2597 			    "ROM image version %d.%d is older than 3.25\n",
   2598 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2599 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2600 		}
   2601 	}
   2602 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2603 		wm_pll_workaround_i210(sc);
   2604 
   2605 	wm_get_wakeup(sc);
   2606 
   2607 	/* Non-AMT based hardware can now take control from firmware */
   2608 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2609 		wm_get_hw_control(sc);
   2610 
   2611 	/*
   2612 	 * Read the Ethernet address from the EEPROM, if not first found
   2613 	 * in device properties.
   2614 	 */
   2615 	ea = prop_dictionary_get(dict, "mac-address");
   2616 	if (ea != NULL) {
   2617 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2618 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2619 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2620 	} else {
   2621 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2622 			aprint_error_dev(sc->sc_dev,
   2623 			    "unable to read Ethernet address\n");
   2624 			goto out;
   2625 		}
   2626 	}
   2627 
   2628 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2629 	    ether_sprintf(enaddr));
   2630 
   2631 	/*
   2632 	 * Read the config info from the EEPROM, and set up various
   2633 	 * bits in the control registers based on their contents.
   2634 	 */
   2635 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2636 	if (pn != NULL) {
   2637 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2638 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2639 	} else {
   2640 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2641 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2642 			goto out;
   2643 		}
   2644 	}
   2645 
   2646 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2647 	if (pn != NULL) {
   2648 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2649 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2650 	} else {
   2651 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2652 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2653 			goto out;
   2654 		}
   2655 	}
   2656 
   2657 	/* check for WM_F_WOL */
   2658 	switch (sc->sc_type) {
   2659 	case WM_T_82542_2_0:
   2660 	case WM_T_82542_2_1:
   2661 	case WM_T_82543:
   2662 		/* dummy? */
   2663 		eeprom_data = 0;
   2664 		apme_mask = NVM_CFG3_APME;
   2665 		break;
   2666 	case WM_T_82544:
   2667 		apme_mask = NVM_CFG2_82544_APM_EN;
   2668 		eeprom_data = cfg2;
   2669 		break;
   2670 	case WM_T_82546:
   2671 	case WM_T_82546_3:
   2672 	case WM_T_82571:
   2673 	case WM_T_82572:
   2674 	case WM_T_82573:
   2675 	case WM_T_82574:
   2676 	case WM_T_82583:
   2677 	case WM_T_80003:
   2678 	case WM_T_82575:
   2679 	case WM_T_82576:
   2680 		apme_mask = NVM_CFG3_APME;
   2681 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2682 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2683 		break;
   2684 	case WM_T_82580:
   2685 	case WM_T_I350:
   2686 	case WM_T_I354:
   2687 	case WM_T_I210:
   2688 	case WM_T_I211:
   2689 		apme_mask = NVM_CFG3_APME;
   2690 		wm_nvm_read(sc,
   2691 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2692 		    1, &eeprom_data);
   2693 		break;
   2694 	case WM_T_ICH8:
   2695 	case WM_T_ICH9:
   2696 	case WM_T_ICH10:
   2697 	case WM_T_PCH:
   2698 	case WM_T_PCH2:
   2699 	case WM_T_PCH_LPT:
   2700 	case WM_T_PCH_SPT:
   2701 	case WM_T_PCH_CNP:
   2702 		/* Already checked before wm_reset () */
   2703 		apme_mask = eeprom_data = 0;
   2704 		break;
   2705 	default: /* XXX 82540 */
   2706 		apme_mask = NVM_CFG3_APME;
   2707 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2708 		break;
   2709 	}
   2710 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2711 	if ((eeprom_data & apme_mask) != 0)
   2712 		sc->sc_flags |= WM_F_WOL;
   2713 
   2714 	/*
   2715 	 * We have the eeprom settings, now apply the special cases
   2716 	 * where the eeprom may be wrong or the board won't support
   2717 	 * wake on lan on a particular port
   2718 	 */
   2719 	switch (sc->sc_pcidevid) {
   2720 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2721 		sc->sc_flags &= ~WM_F_WOL;
   2722 		break;
   2723 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2724 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2725 		/* Wake events only supported on port A for dual fiber
   2726 		 * regardless of eeprom setting */
   2727 		if (sc->sc_funcid == 1)
   2728 			sc->sc_flags &= ~WM_F_WOL;
   2729 		break;
   2730 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2731 		/* If quad port adapter, disable WoL on all but port A */
   2732 		if (sc->sc_funcid != 0)
   2733 			sc->sc_flags &= ~WM_F_WOL;
   2734 		break;
   2735 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2736 		/* Wake events only supported on port A for dual fiber
   2737 		 * regardless of eeprom setting */
   2738 		if (sc->sc_funcid == 1)
   2739 			sc->sc_flags &= ~WM_F_WOL;
   2740 		break;
   2741 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2742 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2743 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2744 		/* If quad port adapter, disable WoL on all but port A */
   2745 		if (sc->sc_funcid != 0)
   2746 			sc->sc_flags &= ~WM_F_WOL;
   2747 		break;
   2748 	}
   2749 
   2750 	if (sc->sc_type >= WM_T_82575) {
   2751 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2752 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2753 			    nvmword);
   2754 			if ((sc->sc_type == WM_T_82575) ||
   2755 			    (sc->sc_type == WM_T_82576)) {
   2756 				/* Check NVM for autonegotiation */
   2757 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2758 				    != 0)
   2759 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2760 			}
   2761 			if ((sc->sc_type == WM_T_82575) ||
   2762 			    (sc->sc_type == WM_T_I350)) {
   2763 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2764 					sc->sc_flags |= WM_F_MAS;
   2765 			}
   2766 		}
   2767 	}
   2768 
   2769 	/*
   2770 	 * XXX need special handling for some multiple port cards
   2771 	 * to disable a paticular port.
   2772 	 */
   2773 
   2774 	if (sc->sc_type >= WM_T_82544) {
   2775 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2776 		if (pn != NULL) {
   2777 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2778 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2779 		} else {
   2780 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2781 				aprint_error_dev(sc->sc_dev,
   2782 				    "unable to read SWDPIN\n");
   2783 				goto out;
   2784 			}
   2785 		}
   2786 	}
   2787 
   2788 	if (cfg1 & NVM_CFG1_ILOS)
   2789 		sc->sc_ctrl |= CTRL_ILOS;
   2790 
   2791 	/*
   2792 	 * XXX
   2793 	 * This code isn't correct because pin 2 and 3 are located
   2794 	 * in different position on newer chips. Check all datasheet.
   2795 	 *
   2796 	 * Until resolve this problem, check if a chip < 82580
   2797 	 */
   2798 	if (sc->sc_type <= WM_T_82580) {
   2799 		if (sc->sc_type >= WM_T_82544) {
   2800 			sc->sc_ctrl |=
   2801 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2802 			    CTRL_SWDPIO_SHIFT;
   2803 			sc->sc_ctrl |=
   2804 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2805 			    CTRL_SWDPINS_SHIFT;
   2806 		} else {
   2807 			sc->sc_ctrl |=
   2808 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2809 			    CTRL_SWDPIO_SHIFT;
   2810 		}
   2811 	}
   2812 
   2813 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2814 		wm_nvm_read(sc,
   2815 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2816 		    1, &nvmword);
   2817 		if (nvmword & NVM_CFG3_ILOS)
   2818 			sc->sc_ctrl |= CTRL_ILOS;
   2819 	}
   2820 
   2821 #if 0
   2822 	if (sc->sc_type >= WM_T_82544) {
   2823 		if (cfg1 & NVM_CFG1_IPS0)
   2824 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2825 		if (cfg1 & NVM_CFG1_IPS1)
   2826 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2827 		sc->sc_ctrl_ext |=
   2828 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2829 		    CTRL_EXT_SWDPIO_SHIFT;
   2830 		sc->sc_ctrl_ext |=
   2831 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2832 		    CTRL_EXT_SWDPINS_SHIFT;
   2833 	} else {
   2834 		sc->sc_ctrl_ext |=
   2835 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2836 		    CTRL_EXT_SWDPIO_SHIFT;
   2837 	}
   2838 #endif
   2839 
   2840 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2841 #if 0
   2842 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2843 #endif
   2844 
   2845 	if (sc->sc_type == WM_T_PCH) {
   2846 		uint16_t val;
   2847 
   2848 		/* Save the NVM K1 bit setting */
   2849 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2850 
   2851 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2852 			sc->sc_nvm_k1_enabled = 1;
   2853 		else
   2854 			sc->sc_nvm_k1_enabled = 0;
   2855 	}
   2856 
   2857 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2858 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2859 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2860 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2861 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2862 	    || sc->sc_type == WM_T_82573
   2863 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2864 		/* Copper only */
   2865 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2866 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2867 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2868 	    || (sc->sc_type ==WM_T_I211)) {
   2869 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2870 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2871 		switch (link_mode) {
   2872 		case CTRL_EXT_LINK_MODE_1000KX:
   2873 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2874 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2875 			break;
   2876 		case CTRL_EXT_LINK_MODE_SGMII:
   2877 			if (wm_sgmii_uses_mdio(sc)) {
   2878 				aprint_normal_dev(sc->sc_dev,
   2879 				    "SGMII(MDIO)\n");
   2880 				sc->sc_flags |= WM_F_SGMII;
   2881 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2882 				break;
   2883 			}
   2884 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2885 			/*FALLTHROUGH*/
   2886 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2887 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2888 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2889 				if (link_mode
   2890 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2891 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2892 					sc->sc_flags |= WM_F_SGMII;
   2893 					aprint_verbose_dev(sc->sc_dev,
   2894 					    "SGMII\n");
   2895 				} else {
   2896 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2897 					aprint_verbose_dev(sc->sc_dev,
   2898 					    "SERDES\n");
   2899 				}
   2900 				break;
   2901 			}
   2902 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2903 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2904 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2905 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2906 				sc->sc_flags |= WM_F_SGMII;
   2907 			}
   2908 			/* Do not change link mode for 100BaseFX */
   2909 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2910 				break;
   2911 
   2912 			/* Change current link mode setting */
   2913 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2914 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2915 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2916 			else
   2917 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2918 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2919 			break;
   2920 		case CTRL_EXT_LINK_MODE_GMII:
   2921 		default:
   2922 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2923 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2924 			break;
   2925 		}
   2926 
   2927 		reg &= ~CTRL_EXT_I2C_ENA;
   2928 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2929 			reg |= CTRL_EXT_I2C_ENA;
   2930 		else
   2931 			reg &= ~CTRL_EXT_I2C_ENA;
   2932 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2933 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2934 			if (!wm_sgmii_uses_mdio(sc))
   2935 				wm_gmii_setup_phytype(sc, 0, 0);
   2936 			wm_reset_mdicnfg_82580(sc);
   2937 		}
   2938 	} else if (sc->sc_type < WM_T_82543 ||
   2939 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2940 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2941 			aprint_error_dev(sc->sc_dev,
   2942 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2943 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2944 		}
   2945 	} else {
   2946 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2947 			aprint_error_dev(sc->sc_dev,
   2948 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2949 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2950 		}
   2951 	}
   2952 
   2953 	if (sc->sc_type >= WM_T_PCH2)
   2954 		sc->sc_flags |= WM_F_EEE;
   2955 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2956 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2957 		/* XXX: Need special handling for I354. (not yet) */
   2958 		if (sc->sc_type != WM_T_I354)
   2959 			sc->sc_flags |= WM_F_EEE;
   2960 	}
   2961 
   2962 	/*
   2963 	 * The I350 has a bug where it always strips the CRC whether
   2964 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   2965 	 */
   2966 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2967 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2968 		sc->sc_flags |= WM_F_CRC_STRIP;
   2969 
   2970 	/* Set device properties (macflags) */
   2971 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2972 
   2973 	if (sc->sc_flags != 0) {
   2974 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2975 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2976 	}
   2977 
   2978 #ifdef WM_MPSAFE
   2979 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2980 #else
   2981 	sc->sc_core_lock = NULL;
   2982 #endif
   2983 
   2984 	/* Initialize the media structures accordingly. */
   2985 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2986 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2987 	else
   2988 		wm_tbi_mediainit(sc); /* All others */
   2989 
   2990 	ifp = &sc->sc_ethercom.ec_if;
   2991 	xname = device_xname(sc->sc_dev);
   2992 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2993 	ifp->if_softc = sc;
   2994 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2995 #ifdef WM_MPSAFE
   2996 	ifp->if_extflags = IFEF_MPSAFE;
   2997 #endif
   2998 	ifp->if_ioctl = wm_ioctl;
   2999 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3000 		ifp->if_start = wm_nq_start;
   3001 		/*
   3002 		 * When the number of CPUs is one and the controller can use
   3003 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3004 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3005 		 * and the other is used for link status changing.
   3006 		 * In this situation, wm_nq_transmit() is disadvantageous
   3007 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3008 		 */
   3009 		if (wm_is_using_multiqueue(sc))
   3010 			ifp->if_transmit = wm_nq_transmit;
   3011 	} else {
   3012 		ifp->if_start = wm_start;
   3013 		/*
   3014 		 * wm_transmit() has the same disadvantage as wm_transmit().
   3015 		 */
   3016 		if (wm_is_using_multiqueue(sc))
   3017 			ifp->if_transmit = wm_transmit;
   3018 	}
   3019 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3020 	ifp->if_init = wm_init;
   3021 	ifp->if_stop = wm_stop;
   3022 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3023 	IFQ_SET_READY(&ifp->if_snd);
   3024 
   3025 	/* Check for jumbo frame */
   3026 	switch (sc->sc_type) {
   3027 	case WM_T_82573:
   3028 		/* XXX limited to 9234 if ASPM is disabled */
   3029 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3030 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3031 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3032 		break;
   3033 	case WM_T_82571:
   3034 	case WM_T_82572:
   3035 	case WM_T_82574:
   3036 	case WM_T_82583:
   3037 	case WM_T_82575:
   3038 	case WM_T_82576:
   3039 	case WM_T_82580:
   3040 	case WM_T_I350:
   3041 	case WM_T_I354:
   3042 	case WM_T_I210:
   3043 	case WM_T_I211:
   3044 	case WM_T_80003:
   3045 	case WM_T_ICH9:
   3046 	case WM_T_ICH10:
   3047 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3048 	case WM_T_PCH_LPT:
   3049 	case WM_T_PCH_SPT:
   3050 	case WM_T_PCH_CNP:
   3051 		/* XXX limited to 9234 */
   3052 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3053 		break;
   3054 	case WM_T_PCH:
   3055 		/* XXX limited to 4096 */
   3056 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3057 		break;
   3058 	case WM_T_82542_2_0:
   3059 	case WM_T_82542_2_1:
   3060 	case WM_T_ICH8:
   3061 		/* No support for jumbo frame */
   3062 		break;
   3063 	default:
   3064 		/* ETHER_MAX_LEN_JUMBO */
   3065 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3066 		break;
   3067 	}
   3068 
   3069 	/* If we're a i82543 or greater, we can support VLANs. */
   3070 	if (sc->sc_type >= WM_T_82543) {
   3071 		sc->sc_ethercom.ec_capabilities |=
   3072 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3073 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3074 	}
   3075 
   3076 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3077 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3078 
   3079 	/*
   3080 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   3081 	 * on i82543 and later.
   3082 	 */
   3083 	if (sc->sc_type >= WM_T_82543) {
   3084 		ifp->if_capabilities |=
   3085 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3086 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3087 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3088 		    IFCAP_CSUM_TCPv6_Tx |
   3089 		    IFCAP_CSUM_UDPv6_Tx;
   3090 	}
   3091 
   3092 	/*
   3093 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3094 	 *
   3095 	 *	82541GI (8086:1076) ... no
   3096 	 *	82572EI (8086:10b9) ... yes
   3097 	 */
   3098 	if (sc->sc_type >= WM_T_82571) {
   3099 		ifp->if_capabilities |=
   3100 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3101 	}
   3102 
   3103 	/*
   3104 	 * If we're a i82544 or greater (except i82547), we can do
   3105 	 * TCP segmentation offload.
   3106 	 */
   3107 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3108 		ifp->if_capabilities |= IFCAP_TSOv4;
   3109 	}
   3110 
   3111 	if (sc->sc_type >= WM_T_82571) {
   3112 		ifp->if_capabilities |= IFCAP_TSOv6;
   3113 	}
   3114 
   3115 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3116 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3117 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3118 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3119 
   3120 	/* Attach the interface. */
   3121 	if_initialize(ifp);
   3122 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3123 	ether_ifattach(ifp, enaddr);
   3124 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3125 	if_register(ifp);
   3126 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3127 	    RND_FLAG_DEFAULT);
   3128 
   3129 #ifdef WM_EVENT_COUNTERS
   3130 	/* Attach event counters. */
   3131 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3132 	    NULL, xname, "linkintr");
   3133 
   3134 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3135 	    NULL, xname, "tx_xoff");
   3136 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3137 	    NULL, xname, "tx_xon");
   3138 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3139 	    NULL, xname, "rx_xoff");
   3140 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3141 	    NULL, xname, "rx_xon");
   3142 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3143 	    NULL, xname, "rx_macctl");
   3144 #endif /* WM_EVENT_COUNTERS */
   3145 
   3146 	sc->sc_txrx_use_workqueue = false;
   3147 
   3148 	if (wm_phy_need_linkdown_discard(sc))
   3149 		wm_set_linkdown_discard(sc);
   3150 
   3151 	wm_init_sysctls(sc);
   3152 
   3153 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3154 		pmf_class_network_register(self, ifp);
   3155 	else
   3156 		aprint_error_dev(self, "couldn't establish power handler\n");
   3157 
   3158 	sc->sc_flags |= WM_F_ATTACHED;
   3159 out:
   3160 	return;
   3161 }
   3162 
   3163 /* The detach function (ca_detach) */
   3164 static int
   3165 wm_detach(device_t self, int flags __unused)
   3166 {
   3167 	struct wm_softc *sc = device_private(self);
   3168 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3169 	int i;
   3170 
   3171 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3172 		return 0;
   3173 
   3174 	/* Stop the interface. Callouts are stopped in it. */
   3175 	wm_stop(ifp, 1);
   3176 
   3177 	pmf_device_deregister(self);
   3178 
   3179 	sysctl_teardown(&sc->sc_sysctllog);
   3180 
   3181 #ifdef WM_EVENT_COUNTERS
   3182 	evcnt_detach(&sc->sc_ev_linkintr);
   3183 
   3184 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3185 	evcnt_detach(&sc->sc_ev_tx_xon);
   3186 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3187 	evcnt_detach(&sc->sc_ev_rx_xon);
   3188 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3189 #endif /* WM_EVENT_COUNTERS */
   3190 
   3191 	rnd_detach_source(&sc->rnd_source);
   3192 
   3193 	/* Tell the firmware about the release */
   3194 	WM_CORE_LOCK(sc);
   3195 	wm_release_manageability(sc);
   3196 	wm_release_hw_control(sc);
   3197 	wm_enable_wakeup(sc);
   3198 	WM_CORE_UNLOCK(sc);
   3199 
   3200 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3201 
   3202 	ether_ifdetach(ifp);
   3203 	if_detach(ifp);
   3204 	if_percpuq_destroy(sc->sc_ipq);
   3205 
   3206 	/* Delete all remaining media. */
   3207 	ifmedia_fini(&sc->sc_mii.mii_media);
   3208 
   3209 	/* Unload RX dmamaps and free mbufs */
   3210 	for (i = 0; i < sc->sc_nqueues; i++) {
   3211 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3212 		mutex_enter(rxq->rxq_lock);
   3213 		wm_rxdrain(rxq);
   3214 		mutex_exit(rxq->rxq_lock);
   3215 	}
   3216 	/* Must unlock here */
   3217 
   3218 	/* Disestablish the interrupt handler */
   3219 	for (i = 0; i < sc->sc_nintrs; i++) {
   3220 		if (sc->sc_ihs[i] != NULL) {
   3221 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3222 			sc->sc_ihs[i] = NULL;
   3223 		}
   3224 	}
   3225 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3226 
   3227 	/* wm_stop() ensure workqueue is stopped. */
   3228 	workqueue_destroy(sc->sc_queue_wq);
   3229 
   3230 	for (i = 0; i < sc->sc_nqueues; i++)
   3231 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3232 
   3233 	wm_free_txrx_queues(sc);
   3234 
   3235 	/* Unmap the registers */
   3236 	if (sc->sc_ss) {
   3237 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3238 		sc->sc_ss = 0;
   3239 	}
   3240 	if (sc->sc_ios) {
   3241 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3242 		sc->sc_ios = 0;
   3243 	}
   3244 	if (sc->sc_flashs) {
   3245 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3246 		sc->sc_flashs = 0;
   3247 	}
   3248 
   3249 	if (sc->sc_core_lock)
   3250 		mutex_obj_free(sc->sc_core_lock);
   3251 	if (sc->sc_ich_phymtx)
   3252 		mutex_obj_free(sc->sc_ich_phymtx);
   3253 	if (sc->sc_ich_nvmmtx)
   3254 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3255 
   3256 	return 0;
   3257 }
   3258 
   3259 static bool
   3260 wm_suspend(device_t self, const pmf_qual_t *qual)
   3261 {
   3262 	struct wm_softc *sc = device_private(self);
   3263 
   3264 	wm_release_manageability(sc);
   3265 	wm_release_hw_control(sc);
   3266 	wm_enable_wakeup(sc);
   3267 
   3268 	return true;
   3269 }
   3270 
   3271 static bool
   3272 wm_resume(device_t self, const pmf_qual_t *qual)
   3273 {
   3274 	struct wm_softc *sc = device_private(self);
   3275 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3276 	pcireg_t reg;
   3277 	char buf[256];
   3278 
   3279 	reg = CSR_READ(sc, WMREG_WUS);
   3280 	if (reg != 0) {
   3281 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3282 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3283 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3284 	}
   3285 
   3286 	if (sc->sc_type >= WM_T_PCH2)
   3287 		wm_resume_workarounds_pchlan(sc);
   3288 	if ((ifp->if_flags & IFF_UP) == 0) {
   3289 		wm_reset(sc);
   3290 		/* Non-AMT based hardware can now take control from firmware */
   3291 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3292 			wm_get_hw_control(sc);
   3293 		wm_init_manageability(sc);
   3294 	} else {
   3295 		/*
   3296 		 * We called pmf_class_network_register(), so if_init() is
   3297 		 * automatically called when IFF_UP. wm_reset(),
   3298 		 * wm_get_hw_control() and wm_init_manageability() are called
   3299 		 * via wm_init().
   3300 		 */
   3301 	}
   3302 
   3303 	return true;
   3304 }
   3305 
   3306 /*
   3307  * wm_watchdog:		[ifnet interface function]
   3308  *
   3309  *	Watchdog timer handler.
   3310  */
   3311 static void
   3312 wm_watchdog(struct ifnet *ifp)
   3313 {
   3314 	int qid;
   3315 	struct wm_softc *sc = ifp->if_softc;
   3316 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3317 
   3318 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3319 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3320 
   3321 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3322 	}
   3323 
   3324 	/* IF any of queues hanged up, reset the interface. */
   3325 	if (hang_queue != 0) {
   3326 		(void)wm_init(ifp);
   3327 
   3328 		/*
   3329 		 * There are still some upper layer processing which call
   3330 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3331 		 */
   3332 		/* Try to get more packets going. */
   3333 		ifp->if_start(ifp);
   3334 	}
   3335 }
   3336 
   3337 
   3338 static void
   3339 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3340 {
   3341 
   3342 	mutex_enter(txq->txq_lock);
   3343 	if (txq->txq_sending &&
   3344 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3345 		wm_watchdog_txq_locked(ifp, txq, hang);
   3346 
   3347 	mutex_exit(txq->txq_lock);
   3348 }
   3349 
   3350 static void
   3351 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3352     uint16_t *hang)
   3353 {
   3354 	struct wm_softc *sc = ifp->if_softc;
   3355 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3356 
   3357 	KASSERT(mutex_owned(txq->txq_lock));
   3358 
   3359 	/*
   3360 	 * Since we're using delayed interrupts, sweep up
   3361 	 * before we report an error.
   3362 	 */
   3363 	wm_txeof(txq, UINT_MAX);
   3364 
   3365 	if (txq->txq_sending)
   3366 		*hang |= __BIT(wmq->wmq_id);
   3367 
   3368 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3369 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3370 		    device_xname(sc->sc_dev));
   3371 	} else {
   3372 #ifdef WM_DEBUG
   3373 		int i, j;
   3374 		struct wm_txsoft *txs;
   3375 #endif
   3376 		log(LOG_ERR,
   3377 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3378 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3379 		    txq->txq_next);
   3380 		if_statinc(ifp, if_oerrors);
   3381 #ifdef WM_DEBUG
   3382 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3383 		    i = WM_NEXTTXS(txq, i)) {
   3384 			txs = &txq->txq_soft[i];
   3385 			printf("txs %d tx %d -> %d\n",
   3386 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3387 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3388 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3389 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3390 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3391 					printf("\t %#08x%08x\n",
   3392 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3393 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3394 				} else {
   3395 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3396 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3397 					    txq->txq_descs[j].wtx_addr.wa_low);
   3398 					printf("\t %#04x%02x%02x%08x\n",
   3399 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3400 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3401 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3402 					    txq->txq_descs[j].wtx_cmdlen);
   3403 				}
   3404 				if (j == txs->txs_lastdesc)
   3405 					break;
   3406 			}
   3407 		}
   3408 #endif
   3409 	}
   3410 }
   3411 
   3412 /*
   3413  * wm_tick:
   3414  *
   3415  *	One second timer, used to check link status, sweep up
   3416  *	completed transmit jobs, etc.
   3417  */
   3418 static void
   3419 wm_tick(void *arg)
   3420 {
   3421 	struct wm_softc *sc = arg;
   3422 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3423 #ifndef WM_MPSAFE
   3424 	int s = splnet();
   3425 #endif
   3426 
   3427 	WM_CORE_LOCK(sc);
   3428 
   3429 	if (sc->sc_core_stopping) {
   3430 		WM_CORE_UNLOCK(sc);
   3431 #ifndef WM_MPSAFE
   3432 		splx(s);
   3433 #endif
   3434 		return;
   3435 	}
   3436 
   3437 	if (sc->sc_type >= WM_T_82542_2_1) {
   3438 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3439 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3440 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3441 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3442 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3443 	}
   3444 
   3445 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3446 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3447 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3448 	    + CSR_READ(sc, WMREG_CRCERRS)
   3449 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3450 	    + CSR_READ(sc, WMREG_SYMERRC)
   3451 	    + CSR_READ(sc, WMREG_RXERRC)
   3452 	    + CSR_READ(sc, WMREG_SEC)
   3453 	    + CSR_READ(sc, WMREG_CEXTERR)
   3454 	    + CSR_READ(sc, WMREG_RLEC));
   3455 	/*
   3456 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3457 	 * memory. It does not mean the number of dropped packet. Because
   3458 	 * ethernet controller can receive packets in such case if there is
   3459 	 * space in phy's FIFO.
   3460 	 *
   3461 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3462 	 * own EVCNT instead of if_iqdrops.
   3463 	 */
   3464 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3465 	IF_STAT_PUTREF(ifp);
   3466 
   3467 	if (sc->sc_flags & WM_F_HAS_MII)
   3468 		mii_tick(&sc->sc_mii);
   3469 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3470 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3471 		wm_serdes_tick(sc);
   3472 	else
   3473 		wm_tbi_tick(sc);
   3474 
   3475 	WM_CORE_UNLOCK(sc);
   3476 
   3477 	wm_watchdog(ifp);
   3478 
   3479 	callout_schedule(&sc->sc_tick_ch, hz);
   3480 }
   3481 
   3482 static int
   3483 wm_ifflags_cb(struct ethercom *ec)
   3484 {
   3485 	struct ifnet *ifp = &ec->ec_if;
   3486 	struct wm_softc *sc = ifp->if_softc;
   3487 	u_short iffchange;
   3488 	int ecchange;
   3489 	bool needreset = false;
   3490 	int rc = 0;
   3491 
   3492 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3493 		device_xname(sc->sc_dev), __func__));
   3494 
   3495 	WM_CORE_LOCK(sc);
   3496 
   3497 	/*
   3498 	 * Check for if_flags.
   3499 	 * Main usage is to prevent linkdown when opening bpf.
   3500 	 */
   3501 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3502 	sc->sc_if_flags = ifp->if_flags;
   3503 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3504 		needreset = true;
   3505 		goto ec;
   3506 	}
   3507 
   3508 	/* iff related updates */
   3509 	if ((iffchange & IFF_PROMISC) != 0)
   3510 		wm_set_filter(sc);
   3511 
   3512 	wm_set_vlan(sc);
   3513 
   3514 ec:
   3515 	/* Check for ec_capenable. */
   3516 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3517 	sc->sc_ec_capenable = ec->ec_capenable;
   3518 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3519 		needreset = true;
   3520 		goto out;
   3521 	}
   3522 
   3523 	/* ec related updates */
   3524 	wm_set_eee(sc);
   3525 
   3526 out:
   3527 	if (needreset)
   3528 		rc = ENETRESET;
   3529 	WM_CORE_UNLOCK(sc);
   3530 
   3531 	return rc;
   3532 }
   3533 
   3534 static bool
   3535 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3536 {
   3537 
   3538 	switch (sc->sc_phytype) {
   3539 	case WMPHY_82577: /* ihphy */
   3540 	case WMPHY_82578: /* atphy */
   3541 	case WMPHY_82579: /* ihphy */
   3542 	case WMPHY_I217: /* ihphy */
   3543 	case WMPHY_82580: /* ihphy */
   3544 	case WMPHY_I350: /* ihphy */
   3545 		return true;
   3546 	default:
   3547 		return false;
   3548 	}
   3549 }
   3550 
   3551 static void
   3552 wm_set_linkdown_discard(struct wm_softc *sc)
   3553 {
   3554 
   3555 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3556 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3557 
   3558 		mutex_enter(txq->txq_lock);
   3559 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3560 		mutex_exit(txq->txq_lock);
   3561 	}
   3562 }
   3563 
   3564 static void
   3565 wm_clear_linkdown_discard(struct wm_softc *sc)
   3566 {
   3567 
   3568 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3569 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3570 
   3571 		mutex_enter(txq->txq_lock);
   3572 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   3573 		mutex_exit(txq->txq_lock);
   3574 	}
   3575 }
   3576 
   3577 /*
   3578  * wm_ioctl:		[ifnet interface function]
   3579  *
   3580  *	Handle control requests from the operator.
   3581  */
   3582 static int
   3583 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3584 {
   3585 	struct wm_softc *sc = ifp->if_softc;
   3586 	struct ifreq *ifr = (struct ifreq *)data;
   3587 	struct ifaddr *ifa = (struct ifaddr *)data;
   3588 	struct sockaddr_dl *sdl;
   3589 	int s, error;
   3590 
   3591 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3592 		device_xname(sc->sc_dev), __func__));
   3593 
   3594 #ifndef WM_MPSAFE
   3595 	s = splnet();
   3596 #endif
   3597 	switch (cmd) {
   3598 	case SIOCSIFMEDIA:
   3599 		WM_CORE_LOCK(sc);
   3600 		/* Flow control requires full-duplex mode. */
   3601 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3602 		    (ifr->ifr_media & IFM_FDX) == 0)
   3603 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3604 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3605 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3606 				/* We can do both TXPAUSE and RXPAUSE. */
   3607 				ifr->ifr_media |=
   3608 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3609 			}
   3610 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3611 		}
   3612 		WM_CORE_UNLOCK(sc);
   3613 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3614 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   3615 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE)
   3616 				wm_set_linkdown_discard(sc);
   3617 			else
   3618 				wm_clear_linkdown_discard(sc);
   3619 		}
   3620 		break;
   3621 	case SIOCINITIFADDR:
   3622 		WM_CORE_LOCK(sc);
   3623 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3624 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3625 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3626 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3627 			/* Unicast address is the first multicast entry */
   3628 			wm_set_filter(sc);
   3629 			error = 0;
   3630 			WM_CORE_UNLOCK(sc);
   3631 			break;
   3632 		}
   3633 		WM_CORE_UNLOCK(sc);
   3634 		if (((ifp->if_flags & IFF_UP) == 0) && wm_phy_need_linkdown_discard(sc))
   3635 			wm_clear_linkdown_discard(sc);
   3636 		/*FALLTHROUGH*/
   3637 	default:
   3638 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   3639 			if (((ifp->if_flags & IFF_UP) == 0) && ((ifr->ifr_flags & IFF_UP) != 0)) {
   3640 				wm_clear_linkdown_discard(sc);
   3641 			} else if (((ifp->if_flags & IFF_UP) != 0) && ((ifr->ifr_flags & IFF_UP) == 0)) {
   3642 				wm_set_linkdown_discard(sc);
   3643 			}
   3644 		}
   3645 #ifdef WM_MPSAFE
   3646 		s = splnet();
   3647 #endif
   3648 		/* It may call wm_start, so unlock here */
   3649 		error = ether_ioctl(ifp, cmd, data);
   3650 #ifdef WM_MPSAFE
   3651 		splx(s);
   3652 #endif
   3653 		if (error != ENETRESET)
   3654 			break;
   3655 
   3656 		error = 0;
   3657 
   3658 		if (cmd == SIOCSIFCAP)
   3659 			error = (*ifp->if_init)(ifp);
   3660 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3661 			;
   3662 		else if (ifp->if_flags & IFF_RUNNING) {
   3663 			/*
   3664 			 * Multicast list has changed; set the hardware filter
   3665 			 * accordingly.
   3666 			 */
   3667 			WM_CORE_LOCK(sc);
   3668 			wm_set_filter(sc);
   3669 			WM_CORE_UNLOCK(sc);
   3670 		}
   3671 		break;
   3672 	}
   3673 
   3674 #ifndef WM_MPSAFE
   3675 	splx(s);
   3676 #endif
   3677 	return error;
   3678 }
   3679 
   3680 /* MAC address related */
   3681 
   3682 /*
   3683  * Get the offset of MAC address and return it.
   3684  * If error occured, use offset 0.
   3685  */
   3686 static uint16_t
   3687 wm_check_alt_mac_addr(struct wm_softc *sc)
   3688 {
   3689 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3690 	uint16_t offset = NVM_OFF_MACADDR;
   3691 
   3692 	/* Try to read alternative MAC address pointer */
   3693 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3694 		return 0;
   3695 
   3696 	/* Check pointer if it's valid or not. */
   3697 	if ((offset == 0x0000) || (offset == 0xffff))
   3698 		return 0;
   3699 
   3700 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3701 	/*
   3702 	 * Check whether alternative MAC address is valid or not.
   3703 	 * Some cards have non 0xffff pointer but those don't use
   3704 	 * alternative MAC address in reality.
   3705 	 *
   3706 	 * Check whether the broadcast bit is set or not.
   3707 	 */
   3708 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3709 		if (((myea[0] & 0xff) & 0x01) == 0)
   3710 			return offset; /* Found */
   3711 
   3712 	/* Not found */
   3713 	return 0;
   3714 }
   3715 
   3716 static int
   3717 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3718 {
   3719 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3720 	uint16_t offset = NVM_OFF_MACADDR;
   3721 	int do_invert = 0;
   3722 
   3723 	switch (sc->sc_type) {
   3724 	case WM_T_82580:
   3725 	case WM_T_I350:
   3726 	case WM_T_I354:
   3727 		/* EEPROM Top Level Partitioning */
   3728 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3729 		break;
   3730 	case WM_T_82571:
   3731 	case WM_T_82575:
   3732 	case WM_T_82576:
   3733 	case WM_T_80003:
   3734 	case WM_T_I210:
   3735 	case WM_T_I211:
   3736 		offset = wm_check_alt_mac_addr(sc);
   3737 		if (offset == 0)
   3738 			if ((sc->sc_funcid & 0x01) == 1)
   3739 				do_invert = 1;
   3740 		break;
   3741 	default:
   3742 		if ((sc->sc_funcid & 0x01) == 1)
   3743 			do_invert = 1;
   3744 		break;
   3745 	}
   3746 
   3747 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3748 		goto bad;
   3749 
   3750 	enaddr[0] = myea[0] & 0xff;
   3751 	enaddr[1] = myea[0] >> 8;
   3752 	enaddr[2] = myea[1] & 0xff;
   3753 	enaddr[3] = myea[1] >> 8;
   3754 	enaddr[4] = myea[2] & 0xff;
   3755 	enaddr[5] = myea[2] >> 8;
   3756 
   3757 	/*
   3758 	 * Toggle the LSB of the MAC address on the second port
   3759 	 * of some dual port cards.
   3760 	 */
   3761 	if (do_invert != 0)
   3762 		enaddr[5] ^= 1;
   3763 
   3764 	return 0;
   3765 
   3766  bad:
   3767 	return -1;
   3768 }
   3769 
   3770 /*
   3771  * wm_set_ral:
   3772  *
   3773  *	Set an entery in the receive address list.
   3774  */
   3775 static void
   3776 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3777 {
   3778 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3779 	uint32_t wlock_mac;
   3780 	int rv;
   3781 
   3782 	if (enaddr != NULL) {
   3783 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3784 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3785 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3786 		ral_hi |= RAL_AV;
   3787 	} else {
   3788 		ral_lo = 0;
   3789 		ral_hi = 0;
   3790 	}
   3791 
   3792 	switch (sc->sc_type) {
   3793 	case WM_T_82542_2_0:
   3794 	case WM_T_82542_2_1:
   3795 	case WM_T_82543:
   3796 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3797 		CSR_WRITE_FLUSH(sc);
   3798 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3799 		CSR_WRITE_FLUSH(sc);
   3800 		break;
   3801 	case WM_T_PCH2:
   3802 	case WM_T_PCH_LPT:
   3803 	case WM_T_PCH_SPT:
   3804 	case WM_T_PCH_CNP:
   3805 		if (idx == 0) {
   3806 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3807 			CSR_WRITE_FLUSH(sc);
   3808 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3809 			CSR_WRITE_FLUSH(sc);
   3810 			return;
   3811 		}
   3812 		if (sc->sc_type != WM_T_PCH2) {
   3813 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3814 			    FWSM_WLOCK_MAC);
   3815 			addrl = WMREG_SHRAL(idx - 1);
   3816 			addrh = WMREG_SHRAH(idx - 1);
   3817 		} else {
   3818 			wlock_mac = 0;
   3819 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3820 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3821 		}
   3822 
   3823 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3824 			rv = wm_get_swflag_ich8lan(sc);
   3825 			if (rv != 0)
   3826 				return;
   3827 			CSR_WRITE(sc, addrl, ral_lo);
   3828 			CSR_WRITE_FLUSH(sc);
   3829 			CSR_WRITE(sc, addrh, ral_hi);
   3830 			CSR_WRITE_FLUSH(sc);
   3831 			wm_put_swflag_ich8lan(sc);
   3832 		}
   3833 
   3834 		break;
   3835 	default:
   3836 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3837 		CSR_WRITE_FLUSH(sc);
   3838 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3839 		CSR_WRITE_FLUSH(sc);
   3840 		break;
   3841 	}
   3842 }
   3843 
   3844 /*
   3845  * wm_mchash:
   3846  *
   3847  *	Compute the hash of the multicast address for the 4096-bit
   3848  *	multicast filter.
   3849  */
   3850 static uint32_t
   3851 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3852 {
   3853 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3854 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3855 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3856 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3857 	uint32_t hash;
   3858 
   3859 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3860 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3861 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3862 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3863 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3864 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3865 		return (hash & 0x3ff);
   3866 	}
   3867 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3868 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3869 
   3870 	return (hash & 0xfff);
   3871 }
   3872 
   3873 /*
   3874  *
   3875  *
   3876  */
   3877 static int
   3878 wm_rar_count(struct wm_softc *sc)
   3879 {
   3880 	int size;
   3881 
   3882 	switch (sc->sc_type) {
   3883 	case WM_T_ICH8:
   3884 		size = WM_RAL_TABSIZE_ICH8 -1;
   3885 		break;
   3886 	case WM_T_ICH9:
   3887 	case WM_T_ICH10:
   3888 	case WM_T_PCH:
   3889 		size = WM_RAL_TABSIZE_ICH8;
   3890 		break;
   3891 	case WM_T_PCH2:
   3892 		size = WM_RAL_TABSIZE_PCH2;
   3893 		break;
   3894 	case WM_T_PCH_LPT:
   3895 	case WM_T_PCH_SPT:
   3896 	case WM_T_PCH_CNP:
   3897 		size = WM_RAL_TABSIZE_PCH_LPT;
   3898 		break;
   3899 	case WM_T_82575:
   3900 	case WM_T_I210:
   3901 	case WM_T_I211:
   3902 		size = WM_RAL_TABSIZE_82575;
   3903 		break;
   3904 	case WM_T_82576:
   3905 	case WM_T_82580:
   3906 		size = WM_RAL_TABSIZE_82576;
   3907 		break;
   3908 	case WM_T_I350:
   3909 	case WM_T_I354:
   3910 		size = WM_RAL_TABSIZE_I350;
   3911 		break;
   3912 	default:
   3913 		size = WM_RAL_TABSIZE;
   3914 	}
   3915 
   3916 	return size;
   3917 }
   3918 
   3919 /*
   3920  * wm_set_filter:
   3921  *
   3922  *	Set up the receive filter.
   3923  */
   3924 static void
   3925 wm_set_filter(struct wm_softc *sc)
   3926 {
   3927 	struct ethercom *ec = &sc->sc_ethercom;
   3928 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3929 	struct ether_multi *enm;
   3930 	struct ether_multistep step;
   3931 	bus_addr_t mta_reg;
   3932 	uint32_t hash, reg, bit;
   3933 	int i, size, ralmax, rv;
   3934 
   3935 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3936 		device_xname(sc->sc_dev), __func__));
   3937 
   3938 	if (sc->sc_type >= WM_T_82544)
   3939 		mta_reg = WMREG_CORDOVA_MTA;
   3940 	else
   3941 		mta_reg = WMREG_MTA;
   3942 
   3943 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3944 
   3945 	if (ifp->if_flags & IFF_BROADCAST)
   3946 		sc->sc_rctl |= RCTL_BAM;
   3947 	if (ifp->if_flags & IFF_PROMISC) {
   3948 		sc->sc_rctl |= RCTL_UPE;
   3949 		ETHER_LOCK(ec);
   3950 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3951 		ETHER_UNLOCK(ec);
   3952 		goto allmulti;
   3953 	}
   3954 
   3955 	/*
   3956 	 * Set the station address in the first RAL slot, and
   3957 	 * clear the remaining slots.
   3958 	 */
   3959 	size = wm_rar_count(sc);
   3960 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3961 
   3962 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3963 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3964 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3965 		switch (i) {
   3966 		case 0:
   3967 			/* We can use all entries */
   3968 			ralmax = size;
   3969 			break;
   3970 		case 1:
   3971 			/* Only RAR[0] */
   3972 			ralmax = 1;
   3973 			break;
   3974 		default:
   3975 			/* Available SHRA + RAR[0] */
   3976 			ralmax = i + 1;
   3977 		}
   3978 	} else
   3979 		ralmax = size;
   3980 	for (i = 1; i < size; i++) {
   3981 		if (i < ralmax)
   3982 			wm_set_ral(sc, NULL, i);
   3983 	}
   3984 
   3985 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3986 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3987 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3988 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3989 		size = WM_ICH8_MC_TABSIZE;
   3990 	else
   3991 		size = WM_MC_TABSIZE;
   3992 	/* Clear out the multicast table. */
   3993 	for (i = 0; i < size; i++) {
   3994 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3995 		CSR_WRITE_FLUSH(sc);
   3996 	}
   3997 
   3998 	ETHER_LOCK(ec);
   3999 	ETHER_FIRST_MULTI(step, ec, enm);
   4000 	while (enm != NULL) {
   4001 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4002 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4003 			ETHER_UNLOCK(ec);
   4004 			/*
   4005 			 * We must listen to a range of multicast addresses.
   4006 			 * For now, just accept all multicasts, rather than
   4007 			 * trying to set only those filter bits needed to match
   4008 			 * the range.  (At this time, the only use of address
   4009 			 * ranges is for IP multicast routing, for which the
   4010 			 * range is big enough to require all bits set.)
   4011 			 */
   4012 			goto allmulti;
   4013 		}
   4014 
   4015 		hash = wm_mchash(sc, enm->enm_addrlo);
   4016 
   4017 		reg = (hash >> 5);
   4018 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4019 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4020 		    || (sc->sc_type == WM_T_PCH2)
   4021 		    || (sc->sc_type == WM_T_PCH_LPT)
   4022 		    || (sc->sc_type == WM_T_PCH_SPT)
   4023 		    || (sc->sc_type == WM_T_PCH_CNP))
   4024 			reg &= 0x1f;
   4025 		else
   4026 			reg &= 0x7f;
   4027 		bit = hash & 0x1f;
   4028 
   4029 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4030 		hash |= 1U << bit;
   4031 
   4032 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4033 			/*
   4034 			 * 82544 Errata 9: Certain register cannot be written
   4035 			 * with particular alignments in PCI-X bus operation
   4036 			 * (FCAH, MTA and VFTA).
   4037 			 */
   4038 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4039 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4040 			CSR_WRITE_FLUSH(sc);
   4041 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4042 			CSR_WRITE_FLUSH(sc);
   4043 		} else {
   4044 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4045 			CSR_WRITE_FLUSH(sc);
   4046 		}
   4047 
   4048 		ETHER_NEXT_MULTI(step, enm);
   4049 	}
   4050 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4051 	ETHER_UNLOCK(ec);
   4052 
   4053 	goto setit;
   4054 
   4055  allmulti:
   4056 	sc->sc_rctl |= RCTL_MPE;
   4057 
   4058  setit:
   4059 	if (sc->sc_type >= WM_T_PCH2) {
   4060 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4061 		    && (ifp->if_mtu > ETHERMTU))
   4062 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4063 		else
   4064 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4065 		if (rv != 0)
   4066 			device_printf(sc->sc_dev,
   4067 			    "Failed to do workaround for jumbo frame.\n");
   4068 	}
   4069 
   4070 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4071 }
   4072 
   4073 /* Reset and init related */
   4074 
   4075 static void
   4076 wm_set_vlan(struct wm_softc *sc)
   4077 {
   4078 
   4079 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4080 		device_xname(sc->sc_dev), __func__));
   4081 
   4082 	/* Deal with VLAN enables. */
   4083 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4084 		sc->sc_ctrl |= CTRL_VME;
   4085 	else
   4086 		sc->sc_ctrl &= ~CTRL_VME;
   4087 
   4088 	/* Write the control registers. */
   4089 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4090 }
   4091 
   4092 static void
   4093 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4094 {
   4095 	uint32_t gcr;
   4096 	pcireg_t ctrl2;
   4097 
   4098 	gcr = CSR_READ(sc, WMREG_GCR);
   4099 
   4100 	/* Only take action if timeout value is defaulted to 0 */
   4101 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4102 		goto out;
   4103 
   4104 	if ((gcr & GCR_CAP_VER2) == 0) {
   4105 		gcr |= GCR_CMPL_TMOUT_10MS;
   4106 		goto out;
   4107 	}
   4108 
   4109 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4110 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4111 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4112 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4113 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4114 
   4115 out:
   4116 	/* Disable completion timeout resend */
   4117 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4118 
   4119 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4120 }
   4121 
   4122 void
   4123 wm_get_auto_rd_done(struct wm_softc *sc)
   4124 {
   4125 	int i;
   4126 
   4127 	/* wait for eeprom to reload */
   4128 	switch (sc->sc_type) {
   4129 	case WM_T_82571:
   4130 	case WM_T_82572:
   4131 	case WM_T_82573:
   4132 	case WM_T_82574:
   4133 	case WM_T_82583:
   4134 	case WM_T_82575:
   4135 	case WM_T_82576:
   4136 	case WM_T_82580:
   4137 	case WM_T_I350:
   4138 	case WM_T_I354:
   4139 	case WM_T_I210:
   4140 	case WM_T_I211:
   4141 	case WM_T_80003:
   4142 	case WM_T_ICH8:
   4143 	case WM_T_ICH9:
   4144 		for (i = 0; i < 10; i++) {
   4145 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4146 				break;
   4147 			delay(1000);
   4148 		}
   4149 		if (i == 10) {
   4150 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4151 			    "complete\n", device_xname(sc->sc_dev));
   4152 		}
   4153 		break;
   4154 	default:
   4155 		break;
   4156 	}
   4157 }
   4158 
   4159 void
   4160 wm_lan_init_done(struct wm_softc *sc)
   4161 {
   4162 	uint32_t reg = 0;
   4163 	int i;
   4164 
   4165 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4166 		device_xname(sc->sc_dev), __func__));
   4167 
   4168 	/* Wait for eeprom to reload */
   4169 	switch (sc->sc_type) {
   4170 	case WM_T_ICH10:
   4171 	case WM_T_PCH:
   4172 	case WM_T_PCH2:
   4173 	case WM_T_PCH_LPT:
   4174 	case WM_T_PCH_SPT:
   4175 	case WM_T_PCH_CNP:
   4176 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4177 			reg = CSR_READ(sc, WMREG_STATUS);
   4178 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4179 				break;
   4180 			delay(100);
   4181 		}
   4182 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4183 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4184 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4185 		}
   4186 		break;
   4187 	default:
   4188 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4189 		    __func__);
   4190 		break;
   4191 	}
   4192 
   4193 	reg &= ~STATUS_LAN_INIT_DONE;
   4194 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4195 }
   4196 
   4197 void
   4198 wm_get_cfg_done(struct wm_softc *sc)
   4199 {
   4200 	int mask;
   4201 	uint32_t reg;
   4202 	int i;
   4203 
   4204 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4205 		device_xname(sc->sc_dev), __func__));
   4206 
   4207 	/* Wait for eeprom to reload */
   4208 	switch (sc->sc_type) {
   4209 	case WM_T_82542_2_0:
   4210 	case WM_T_82542_2_1:
   4211 		/* null */
   4212 		break;
   4213 	case WM_T_82543:
   4214 	case WM_T_82544:
   4215 	case WM_T_82540:
   4216 	case WM_T_82545:
   4217 	case WM_T_82545_3:
   4218 	case WM_T_82546:
   4219 	case WM_T_82546_3:
   4220 	case WM_T_82541:
   4221 	case WM_T_82541_2:
   4222 	case WM_T_82547:
   4223 	case WM_T_82547_2:
   4224 	case WM_T_82573:
   4225 	case WM_T_82574:
   4226 	case WM_T_82583:
   4227 		/* generic */
   4228 		delay(10*1000);
   4229 		break;
   4230 	case WM_T_80003:
   4231 	case WM_T_82571:
   4232 	case WM_T_82572:
   4233 	case WM_T_82575:
   4234 	case WM_T_82576:
   4235 	case WM_T_82580:
   4236 	case WM_T_I350:
   4237 	case WM_T_I354:
   4238 	case WM_T_I210:
   4239 	case WM_T_I211:
   4240 		if (sc->sc_type == WM_T_82571) {
   4241 			/* Only 82571 shares port 0 */
   4242 			mask = EEMNGCTL_CFGDONE_0;
   4243 		} else
   4244 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4245 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4246 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4247 				break;
   4248 			delay(1000);
   4249 		}
   4250 		if (i >= WM_PHY_CFG_TIMEOUT)
   4251 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4252 				device_xname(sc->sc_dev), __func__));
   4253 		break;
   4254 	case WM_T_ICH8:
   4255 	case WM_T_ICH9:
   4256 	case WM_T_ICH10:
   4257 	case WM_T_PCH:
   4258 	case WM_T_PCH2:
   4259 	case WM_T_PCH_LPT:
   4260 	case WM_T_PCH_SPT:
   4261 	case WM_T_PCH_CNP:
   4262 		delay(10*1000);
   4263 		if (sc->sc_type >= WM_T_ICH10)
   4264 			wm_lan_init_done(sc);
   4265 		else
   4266 			wm_get_auto_rd_done(sc);
   4267 
   4268 		/* Clear PHY Reset Asserted bit */
   4269 		reg = CSR_READ(sc, WMREG_STATUS);
   4270 		if ((reg & STATUS_PHYRA) != 0)
   4271 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4272 		break;
   4273 	default:
   4274 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4275 		    __func__);
   4276 		break;
   4277 	}
   4278 }
   4279 
   4280 int
   4281 wm_phy_post_reset(struct wm_softc *sc)
   4282 {
   4283 	device_t dev = sc->sc_dev;
   4284 	uint16_t reg;
   4285 	int rv = 0;
   4286 
   4287 	/* This function is only for ICH8 and newer. */
   4288 	if (sc->sc_type < WM_T_ICH8)
   4289 		return 0;
   4290 
   4291 	if (wm_phy_resetisblocked(sc)) {
   4292 		/* XXX */
   4293 		device_printf(dev, "PHY is blocked\n");
   4294 		return -1;
   4295 	}
   4296 
   4297 	/* Allow time for h/w to get to quiescent state after reset */
   4298 	delay(10*1000);
   4299 
   4300 	/* Perform any necessary post-reset workarounds */
   4301 	if (sc->sc_type == WM_T_PCH)
   4302 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4303 	else if (sc->sc_type == WM_T_PCH2)
   4304 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4305 	if (rv != 0)
   4306 		return rv;
   4307 
   4308 	/* Clear the host wakeup bit after lcd reset */
   4309 	if (sc->sc_type >= WM_T_PCH) {
   4310 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4311 		reg &= ~BM_WUC_HOST_WU_BIT;
   4312 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4313 	}
   4314 
   4315 	/* Configure the LCD with the extended configuration region in NVM */
   4316 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4317 		return rv;
   4318 
   4319 	/* Configure the LCD with the OEM bits in NVM */
   4320 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4321 
   4322 	if (sc->sc_type == WM_T_PCH2) {
   4323 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4324 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4325 			delay(10 * 1000);
   4326 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4327 		}
   4328 		/* Set EEE LPI Update Timer to 200usec */
   4329 		rv = sc->phy.acquire(sc);
   4330 		if (rv)
   4331 			return rv;
   4332 		rv = wm_write_emi_reg_locked(dev,
   4333 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4334 		sc->phy.release(sc);
   4335 	}
   4336 
   4337 	return rv;
   4338 }
   4339 
   4340 /* Only for PCH and newer */
   4341 static int
   4342 wm_write_smbus_addr(struct wm_softc *sc)
   4343 {
   4344 	uint32_t strap, freq;
   4345 	uint16_t phy_data;
   4346 	int rv;
   4347 
   4348 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4349 		device_xname(sc->sc_dev), __func__));
   4350 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4351 
   4352 	strap = CSR_READ(sc, WMREG_STRAP);
   4353 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4354 
   4355 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4356 	if (rv != 0)
   4357 		return -1;
   4358 
   4359 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4360 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4361 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4362 
   4363 	if (sc->sc_phytype == WMPHY_I217) {
   4364 		/* Restore SMBus frequency */
   4365 		if (freq --) {
   4366 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4367 			    | HV_SMB_ADDR_FREQ_HIGH);
   4368 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4369 			    HV_SMB_ADDR_FREQ_LOW);
   4370 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4371 			    HV_SMB_ADDR_FREQ_HIGH);
   4372 		} else
   4373 			DPRINTF(sc, WM_DEBUG_INIT,
   4374 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4375 				device_xname(sc->sc_dev), __func__));
   4376 	}
   4377 
   4378 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4379 	    phy_data);
   4380 }
   4381 
   4382 static int
   4383 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4384 {
   4385 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4386 	uint16_t phy_page = 0;
   4387 	int rv = 0;
   4388 
   4389 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4390 		device_xname(sc->sc_dev), __func__));
   4391 
   4392 	switch (sc->sc_type) {
   4393 	case WM_T_ICH8:
   4394 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4395 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4396 			return 0;
   4397 
   4398 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4399 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4400 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4401 			break;
   4402 		}
   4403 		/* FALLTHROUGH */
   4404 	case WM_T_PCH:
   4405 	case WM_T_PCH2:
   4406 	case WM_T_PCH_LPT:
   4407 	case WM_T_PCH_SPT:
   4408 	case WM_T_PCH_CNP:
   4409 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4410 		break;
   4411 	default:
   4412 		return 0;
   4413 	}
   4414 
   4415 	if ((rv = sc->phy.acquire(sc)) != 0)
   4416 		return rv;
   4417 
   4418 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4419 	if ((reg & sw_cfg_mask) == 0)
   4420 		goto release;
   4421 
   4422 	/*
   4423 	 * Make sure HW does not configure LCD from PHY extended configuration
   4424 	 * before SW configuration
   4425 	 */
   4426 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4427 	if ((sc->sc_type < WM_T_PCH2)
   4428 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4429 		goto release;
   4430 
   4431 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4432 		device_xname(sc->sc_dev), __func__));
   4433 	/* word_addr is in DWORD */
   4434 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4435 
   4436 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4437 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4438 	if (cnf_size == 0)
   4439 		goto release;
   4440 
   4441 	if (((sc->sc_type == WM_T_PCH)
   4442 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4443 	    || (sc->sc_type > WM_T_PCH)) {
   4444 		/*
   4445 		 * HW configures the SMBus address and LEDs when the OEM and
   4446 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4447 		 * are cleared, SW will configure them instead.
   4448 		 */
   4449 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4450 			device_xname(sc->sc_dev), __func__));
   4451 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4452 			goto release;
   4453 
   4454 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4455 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4456 		    (uint16_t)reg);
   4457 		if (rv != 0)
   4458 			goto release;
   4459 	}
   4460 
   4461 	/* Configure LCD from extended configuration region. */
   4462 	for (i = 0; i < cnf_size; i++) {
   4463 		uint16_t reg_data, reg_addr;
   4464 
   4465 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4466 			goto release;
   4467 
   4468 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4469 			goto release;
   4470 
   4471 		if (reg_addr == IGPHY_PAGE_SELECT)
   4472 			phy_page = reg_data;
   4473 
   4474 		reg_addr &= IGPHY_MAXREGADDR;
   4475 		reg_addr |= phy_page;
   4476 
   4477 		KASSERT(sc->phy.writereg_locked != NULL);
   4478 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4479 		    reg_data);
   4480 	}
   4481 
   4482 release:
   4483 	sc->phy.release(sc);
   4484 	return rv;
   4485 }
   4486 
   4487 /*
   4488  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4489  *  @sc:       pointer to the HW structure
   4490  *  @d0_state: boolean if entering d0 or d3 device state
   4491  *
   4492  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4493  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4494  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4495  */
   4496 int
   4497 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4498 {
   4499 	uint32_t mac_reg;
   4500 	uint16_t oem_reg;
   4501 	int rv;
   4502 
   4503 	if (sc->sc_type < WM_T_PCH)
   4504 		return 0;
   4505 
   4506 	rv = sc->phy.acquire(sc);
   4507 	if (rv != 0)
   4508 		return rv;
   4509 
   4510 	if (sc->sc_type == WM_T_PCH) {
   4511 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4512 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4513 			goto release;
   4514 	}
   4515 
   4516 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4517 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4518 		goto release;
   4519 
   4520 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4521 
   4522 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4523 	if (rv != 0)
   4524 		goto release;
   4525 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4526 
   4527 	if (d0_state) {
   4528 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4529 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4530 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4531 			oem_reg |= HV_OEM_BITS_LPLU;
   4532 	} else {
   4533 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4534 		    != 0)
   4535 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4536 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4537 		    != 0)
   4538 			oem_reg |= HV_OEM_BITS_LPLU;
   4539 	}
   4540 
   4541 	/* Set Restart auto-neg to activate the bits */
   4542 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4543 	    && (wm_phy_resetisblocked(sc) == false))
   4544 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4545 
   4546 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4547 
   4548 release:
   4549 	sc->phy.release(sc);
   4550 
   4551 	return rv;
   4552 }
   4553 
   4554 /* Init hardware bits */
   4555 void
   4556 wm_initialize_hardware_bits(struct wm_softc *sc)
   4557 {
   4558 	uint32_t tarc0, tarc1, reg;
   4559 
   4560 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4561 		device_xname(sc->sc_dev), __func__));
   4562 
   4563 	/* For 82571 variant, 80003 and ICHs */
   4564 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4565 	    || (sc->sc_type >= WM_T_80003)) {
   4566 
   4567 		/* Transmit Descriptor Control 0 */
   4568 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4569 		reg |= TXDCTL_COUNT_DESC;
   4570 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4571 
   4572 		/* Transmit Descriptor Control 1 */
   4573 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4574 		reg |= TXDCTL_COUNT_DESC;
   4575 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4576 
   4577 		/* TARC0 */
   4578 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4579 		switch (sc->sc_type) {
   4580 		case WM_T_82571:
   4581 		case WM_T_82572:
   4582 		case WM_T_82573:
   4583 		case WM_T_82574:
   4584 		case WM_T_82583:
   4585 		case WM_T_80003:
   4586 			/* Clear bits 30..27 */
   4587 			tarc0 &= ~__BITS(30, 27);
   4588 			break;
   4589 		default:
   4590 			break;
   4591 		}
   4592 
   4593 		switch (sc->sc_type) {
   4594 		case WM_T_82571:
   4595 		case WM_T_82572:
   4596 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4597 
   4598 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4599 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4600 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4601 			/* 8257[12] Errata No.7 */
   4602 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4603 
   4604 			/* TARC1 bit 28 */
   4605 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4606 				tarc1 &= ~__BIT(28);
   4607 			else
   4608 				tarc1 |= __BIT(28);
   4609 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4610 
   4611 			/*
   4612 			 * 8257[12] Errata No.13
   4613 			 * Disable Dyamic Clock Gating.
   4614 			 */
   4615 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4616 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4617 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4618 			break;
   4619 		case WM_T_82573:
   4620 		case WM_T_82574:
   4621 		case WM_T_82583:
   4622 			if ((sc->sc_type == WM_T_82574)
   4623 			    || (sc->sc_type == WM_T_82583))
   4624 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4625 
   4626 			/* Extended Device Control */
   4627 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4628 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4629 			reg |= __BIT(22);	/* Set bit 22 */
   4630 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4631 
   4632 			/* Device Control */
   4633 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4634 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4635 
   4636 			/* PCIe Control Register */
   4637 			/*
   4638 			 * 82573 Errata (unknown).
   4639 			 *
   4640 			 * 82574 Errata 25 and 82583 Errata 12
   4641 			 * "Dropped Rx Packets":
   4642 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4643 			 */
   4644 			reg = CSR_READ(sc, WMREG_GCR);
   4645 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4646 			CSR_WRITE(sc, WMREG_GCR, reg);
   4647 
   4648 			if ((sc->sc_type == WM_T_82574)
   4649 			    || (sc->sc_type == WM_T_82583)) {
   4650 				/*
   4651 				 * Document says this bit must be set for
   4652 				 * proper operation.
   4653 				 */
   4654 				reg = CSR_READ(sc, WMREG_GCR);
   4655 				reg |= __BIT(22);
   4656 				CSR_WRITE(sc, WMREG_GCR, reg);
   4657 
   4658 				/*
   4659 				 * Apply workaround for hardware errata
   4660 				 * documented in errata docs Fixes issue where
   4661 				 * some error prone or unreliable PCIe
   4662 				 * completions are occurring, particularly
   4663 				 * with ASPM enabled. Without fix, issue can
   4664 				 * cause Tx timeouts.
   4665 				 */
   4666 				reg = CSR_READ(sc, WMREG_GCR2);
   4667 				reg |= __BIT(0);
   4668 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4669 			}
   4670 			break;
   4671 		case WM_T_80003:
   4672 			/* TARC0 */
   4673 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4674 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4675 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4676 
   4677 			/* TARC1 bit 28 */
   4678 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4679 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4680 				tarc1 &= ~__BIT(28);
   4681 			else
   4682 				tarc1 |= __BIT(28);
   4683 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4684 			break;
   4685 		case WM_T_ICH8:
   4686 		case WM_T_ICH9:
   4687 		case WM_T_ICH10:
   4688 		case WM_T_PCH:
   4689 		case WM_T_PCH2:
   4690 		case WM_T_PCH_LPT:
   4691 		case WM_T_PCH_SPT:
   4692 		case WM_T_PCH_CNP:
   4693 			/* TARC0 */
   4694 			if (sc->sc_type == WM_T_ICH8) {
   4695 				/* Set TARC0 bits 29 and 28 */
   4696 				tarc0 |= __BITS(29, 28);
   4697 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4698 				tarc0 |= __BIT(29);
   4699 				/*
   4700 				 *  Drop bit 28. From Linux.
   4701 				 * See I218/I219 spec update
   4702 				 * "5. Buffer Overrun While the I219 is
   4703 				 * Processing DMA Transactions"
   4704 				 */
   4705 				tarc0 &= ~__BIT(28);
   4706 			}
   4707 			/* Set TARC0 bits 23,24,26,27 */
   4708 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4709 
   4710 			/* CTRL_EXT */
   4711 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4712 			reg |= __BIT(22);	/* Set bit 22 */
   4713 			/*
   4714 			 * Enable PHY low-power state when MAC is at D3
   4715 			 * w/o WoL
   4716 			 */
   4717 			if (sc->sc_type >= WM_T_PCH)
   4718 				reg |= CTRL_EXT_PHYPDEN;
   4719 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4720 
   4721 			/* TARC1 */
   4722 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4723 			/* bit 28 */
   4724 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4725 				tarc1 &= ~__BIT(28);
   4726 			else
   4727 				tarc1 |= __BIT(28);
   4728 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4729 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4730 
   4731 			/* Device Status */
   4732 			if (sc->sc_type == WM_T_ICH8) {
   4733 				reg = CSR_READ(sc, WMREG_STATUS);
   4734 				reg &= ~__BIT(31);
   4735 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4736 
   4737 			}
   4738 
   4739 			/* IOSFPC */
   4740 			if (sc->sc_type == WM_T_PCH_SPT) {
   4741 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4742 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4743 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4744 			}
   4745 			/*
   4746 			 * Work-around descriptor data corruption issue during
   4747 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4748 			 * capability.
   4749 			 */
   4750 			reg = CSR_READ(sc, WMREG_RFCTL);
   4751 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4752 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4753 			break;
   4754 		default:
   4755 			break;
   4756 		}
   4757 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4758 
   4759 		switch (sc->sc_type) {
   4760 		/*
   4761 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4762 		 * Avoid RSS Hash Value bug.
   4763 		 */
   4764 		case WM_T_82571:
   4765 		case WM_T_82572:
   4766 		case WM_T_82573:
   4767 		case WM_T_80003:
   4768 		case WM_T_ICH8:
   4769 			reg = CSR_READ(sc, WMREG_RFCTL);
   4770 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4771 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4772 			break;
   4773 		case WM_T_82574:
   4774 			/* Use extened Rx descriptor. */
   4775 			reg = CSR_READ(sc, WMREG_RFCTL);
   4776 			reg |= WMREG_RFCTL_EXSTEN;
   4777 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4778 			break;
   4779 		default:
   4780 			break;
   4781 		}
   4782 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4783 		/*
   4784 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4785 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4786 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4787 		 * Correctly by the Device"
   4788 		 *
   4789 		 * I354(C2000) Errata AVR53:
   4790 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4791 		 * Hang"
   4792 		 */
   4793 		reg = CSR_READ(sc, WMREG_RFCTL);
   4794 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4795 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4796 	}
   4797 }
   4798 
   4799 static uint32_t
   4800 wm_rxpbs_adjust_82580(uint32_t val)
   4801 {
   4802 	uint32_t rv = 0;
   4803 
   4804 	if (val < __arraycount(wm_82580_rxpbs_table))
   4805 		rv = wm_82580_rxpbs_table[val];
   4806 
   4807 	return rv;
   4808 }
   4809 
   4810 /*
   4811  * wm_reset_phy:
   4812  *
   4813  *	generic PHY reset function.
   4814  *	Same as e1000_phy_hw_reset_generic()
   4815  */
   4816 static int
   4817 wm_reset_phy(struct wm_softc *sc)
   4818 {
   4819 	uint32_t reg;
   4820 
   4821 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4822 		device_xname(sc->sc_dev), __func__));
   4823 	if (wm_phy_resetisblocked(sc))
   4824 		return -1;
   4825 
   4826 	sc->phy.acquire(sc);
   4827 
   4828 	reg = CSR_READ(sc, WMREG_CTRL);
   4829 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4830 	CSR_WRITE_FLUSH(sc);
   4831 
   4832 	delay(sc->phy.reset_delay_us);
   4833 
   4834 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4835 	CSR_WRITE_FLUSH(sc);
   4836 
   4837 	delay(150);
   4838 
   4839 	sc->phy.release(sc);
   4840 
   4841 	wm_get_cfg_done(sc);
   4842 	wm_phy_post_reset(sc);
   4843 
   4844 	return 0;
   4845 }
   4846 
   4847 /*
   4848  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4849  * so it is enough to check sc->sc_queue[0] only.
   4850  */
   4851 static void
   4852 wm_flush_desc_rings(struct wm_softc *sc)
   4853 {
   4854 	pcireg_t preg;
   4855 	uint32_t reg;
   4856 	struct wm_txqueue *txq;
   4857 	wiseman_txdesc_t *txd;
   4858 	int nexttx;
   4859 	uint32_t rctl;
   4860 
   4861 	/* First, disable MULR fix in FEXTNVM11 */
   4862 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4863 	reg |= FEXTNVM11_DIS_MULRFIX;
   4864 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4865 
   4866 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4867 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4868 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4869 		return;
   4870 
   4871 	/* TX */
   4872 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4873 	    preg, reg);
   4874 	reg = CSR_READ(sc, WMREG_TCTL);
   4875 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4876 
   4877 	txq = &sc->sc_queue[0].wmq_txq;
   4878 	nexttx = txq->txq_next;
   4879 	txd = &txq->txq_descs[nexttx];
   4880 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4881 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4882 	txd->wtx_fields.wtxu_status = 0;
   4883 	txd->wtx_fields.wtxu_options = 0;
   4884 	txd->wtx_fields.wtxu_vlan = 0;
   4885 
   4886 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4887 	    BUS_SPACE_BARRIER_WRITE);
   4888 
   4889 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4890 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4891 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4892 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4893 	delay(250);
   4894 
   4895 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4896 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4897 		return;
   4898 
   4899 	/* RX */
   4900 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4901 	rctl = CSR_READ(sc, WMREG_RCTL);
   4902 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4903 	CSR_WRITE_FLUSH(sc);
   4904 	delay(150);
   4905 
   4906 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4907 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4908 	reg &= 0xffffc000;
   4909 	/*
   4910 	 * Update thresholds: prefetch threshold to 31, host threshold
   4911 	 * to 1 and make sure the granularity is "descriptors" and not
   4912 	 * "cache lines"
   4913 	 */
   4914 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4915 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4916 
   4917 	/* Momentarily enable the RX ring for the changes to take effect */
   4918 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4919 	CSR_WRITE_FLUSH(sc);
   4920 	delay(150);
   4921 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4922 }
   4923 
   4924 /*
   4925  * wm_reset:
   4926  *
   4927  *	Reset the i82542 chip.
   4928  */
   4929 static void
   4930 wm_reset(struct wm_softc *sc)
   4931 {
   4932 	int phy_reset = 0;
   4933 	int i, error = 0;
   4934 	uint32_t reg;
   4935 	uint16_t kmreg;
   4936 	int rv;
   4937 
   4938 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4939 		device_xname(sc->sc_dev), __func__));
   4940 	KASSERT(sc->sc_type != 0);
   4941 
   4942 	/*
   4943 	 * Allocate on-chip memory according to the MTU size.
   4944 	 * The Packet Buffer Allocation register must be written
   4945 	 * before the chip is reset.
   4946 	 */
   4947 	switch (sc->sc_type) {
   4948 	case WM_T_82547:
   4949 	case WM_T_82547_2:
   4950 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4951 		    PBA_22K : PBA_30K;
   4952 		for (i = 0; i < sc->sc_nqueues; i++) {
   4953 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4954 			txq->txq_fifo_head = 0;
   4955 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4956 			txq->txq_fifo_size =
   4957 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4958 			txq->txq_fifo_stall = 0;
   4959 		}
   4960 		break;
   4961 	case WM_T_82571:
   4962 	case WM_T_82572:
   4963 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4964 	case WM_T_80003:
   4965 		sc->sc_pba = PBA_32K;
   4966 		break;
   4967 	case WM_T_82573:
   4968 		sc->sc_pba = PBA_12K;
   4969 		break;
   4970 	case WM_T_82574:
   4971 	case WM_T_82583:
   4972 		sc->sc_pba = PBA_20K;
   4973 		break;
   4974 	case WM_T_82576:
   4975 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4976 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4977 		break;
   4978 	case WM_T_82580:
   4979 	case WM_T_I350:
   4980 	case WM_T_I354:
   4981 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4982 		break;
   4983 	case WM_T_I210:
   4984 	case WM_T_I211:
   4985 		sc->sc_pba = PBA_34K;
   4986 		break;
   4987 	case WM_T_ICH8:
   4988 		/* Workaround for a bit corruption issue in FIFO memory */
   4989 		sc->sc_pba = PBA_8K;
   4990 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4991 		break;
   4992 	case WM_T_ICH9:
   4993 	case WM_T_ICH10:
   4994 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4995 		    PBA_14K : PBA_10K;
   4996 		break;
   4997 	case WM_T_PCH:
   4998 	case WM_T_PCH2:	/* XXX 14K? */
   4999 	case WM_T_PCH_LPT:
   5000 	case WM_T_PCH_SPT:
   5001 	case WM_T_PCH_CNP:
   5002 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5003 		    PBA_12K : PBA_26K;
   5004 		break;
   5005 	default:
   5006 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5007 		    PBA_40K : PBA_48K;
   5008 		break;
   5009 	}
   5010 	/*
   5011 	 * Only old or non-multiqueue devices have the PBA register
   5012 	 * XXX Need special handling for 82575.
   5013 	 */
   5014 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5015 	    || (sc->sc_type == WM_T_82575))
   5016 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5017 
   5018 	/* Prevent the PCI-E bus from sticking */
   5019 	if (sc->sc_flags & WM_F_PCIE) {
   5020 		int timeout = 800;
   5021 
   5022 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5023 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5024 
   5025 		while (timeout--) {
   5026 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5027 			    == 0)
   5028 				break;
   5029 			delay(100);
   5030 		}
   5031 		if (timeout == 0)
   5032 			device_printf(sc->sc_dev,
   5033 			    "failed to disable busmastering\n");
   5034 	}
   5035 
   5036 	/* Set the completion timeout for interface */
   5037 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5038 	    || (sc->sc_type == WM_T_82580)
   5039 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5040 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5041 		wm_set_pcie_completion_timeout(sc);
   5042 
   5043 	/* Clear interrupt */
   5044 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5045 	if (wm_is_using_msix(sc)) {
   5046 		if (sc->sc_type != WM_T_82574) {
   5047 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5048 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5049 		} else
   5050 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5051 	}
   5052 
   5053 	/* Stop the transmit and receive processes. */
   5054 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5055 	sc->sc_rctl &= ~RCTL_EN;
   5056 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5057 	CSR_WRITE_FLUSH(sc);
   5058 
   5059 	/* XXX set_tbi_sbp_82543() */
   5060 
   5061 	delay(10*1000);
   5062 
   5063 	/* Must acquire the MDIO ownership before MAC reset */
   5064 	switch (sc->sc_type) {
   5065 	case WM_T_82573:
   5066 	case WM_T_82574:
   5067 	case WM_T_82583:
   5068 		error = wm_get_hw_semaphore_82573(sc);
   5069 		break;
   5070 	default:
   5071 		break;
   5072 	}
   5073 
   5074 	/*
   5075 	 * 82541 Errata 29? & 82547 Errata 28?
   5076 	 * See also the description about PHY_RST bit in CTRL register
   5077 	 * in 8254x_GBe_SDM.pdf.
   5078 	 */
   5079 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5080 		CSR_WRITE(sc, WMREG_CTRL,
   5081 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5082 		CSR_WRITE_FLUSH(sc);
   5083 		delay(5000);
   5084 	}
   5085 
   5086 	switch (sc->sc_type) {
   5087 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5088 	case WM_T_82541:
   5089 	case WM_T_82541_2:
   5090 	case WM_T_82547:
   5091 	case WM_T_82547_2:
   5092 		/*
   5093 		 * On some chipsets, a reset through a memory-mapped write
   5094 		 * cycle can cause the chip to reset before completing the
   5095 		 * write cycle. This causes major headache that can be avoided
   5096 		 * by issuing the reset via indirect register writes through
   5097 		 * I/O space.
   5098 		 *
   5099 		 * So, if we successfully mapped the I/O BAR at attach time,
   5100 		 * use that. Otherwise, try our luck with a memory-mapped
   5101 		 * reset.
   5102 		 */
   5103 		if (sc->sc_flags & WM_F_IOH_VALID)
   5104 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5105 		else
   5106 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5107 		break;
   5108 	case WM_T_82545_3:
   5109 	case WM_T_82546_3:
   5110 		/* Use the shadow control register on these chips. */
   5111 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5112 		break;
   5113 	case WM_T_80003:
   5114 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5115 		sc->phy.acquire(sc);
   5116 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5117 		sc->phy.release(sc);
   5118 		break;
   5119 	case WM_T_ICH8:
   5120 	case WM_T_ICH9:
   5121 	case WM_T_ICH10:
   5122 	case WM_T_PCH:
   5123 	case WM_T_PCH2:
   5124 	case WM_T_PCH_LPT:
   5125 	case WM_T_PCH_SPT:
   5126 	case WM_T_PCH_CNP:
   5127 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5128 		if (wm_phy_resetisblocked(sc) == false) {
   5129 			/*
   5130 			 * Gate automatic PHY configuration by hardware on
   5131 			 * non-managed 82579
   5132 			 */
   5133 			if ((sc->sc_type == WM_T_PCH2)
   5134 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5135 				== 0))
   5136 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5137 
   5138 			reg |= CTRL_PHY_RESET;
   5139 			phy_reset = 1;
   5140 		} else
   5141 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5142 		sc->phy.acquire(sc);
   5143 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5144 		/* Don't insert a completion barrier when reset */
   5145 		delay(20*1000);
   5146 		mutex_exit(sc->sc_ich_phymtx);
   5147 		break;
   5148 	case WM_T_82580:
   5149 	case WM_T_I350:
   5150 	case WM_T_I354:
   5151 	case WM_T_I210:
   5152 	case WM_T_I211:
   5153 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5154 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5155 			CSR_WRITE_FLUSH(sc);
   5156 		delay(5000);
   5157 		break;
   5158 	case WM_T_82542_2_0:
   5159 	case WM_T_82542_2_1:
   5160 	case WM_T_82543:
   5161 	case WM_T_82540:
   5162 	case WM_T_82545:
   5163 	case WM_T_82546:
   5164 	case WM_T_82571:
   5165 	case WM_T_82572:
   5166 	case WM_T_82573:
   5167 	case WM_T_82574:
   5168 	case WM_T_82575:
   5169 	case WM_T_82576:
   5170 	case WM_T_82583:
   5171 	default:
   5172 		/* Everything else can safely use the documented method. */
   5173 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5174 		break;
   5175 	}
   5176 
   5177 	/* Must release the MDIO ownership after MAC reset */
   5178 	switch (sc->sc_type) {
   5179 	case WM_T_82573:
   5180 	case WM_T_82574:
   5181 	case WM_T_82583:
   5182 		if (error == 0)
   5183 			wm_put_hw_semaphore_82573(sc);
   5184 		break;
   5185 	default:
   5186 		break;
   5187 	}
   5188 
   5189 	/* Set Phy Config Counter to 50msec */
   5190 	if (sc->sc_type == WM_T_PCH2) {
   5191 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5192 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5193 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5194 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5195 	}
   5196 
   5197 	if (phy_reset != 0)
   5198 		wm_get_cfg_done(sc);
   5199 
   5200 	/* Reload EEPROM */
   5201 	switch (sc->sc_type) {
   5202 	case WM_T_82542_2_0:
   5203 	case WM_T_82542_2_1:
   5204 	case WM_T_82543:
   5205 	case WM_T_82544:
   5206 		delay(10);
   5207 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5208 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5209 		CSR_WRITE_FLUSH(sc);
   5210 		delay(2000);
   5211 		break;
   5212 	case WM_T_82540:
   5213 	case WM_T_82545:
   5214 	case WM_T_82545_3:
   5215 	case WM_T_82546:
   5216 	case WM_T_82546_3:
   5217 		delay(5*1000);
   5218 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5219 		break;
   5220 	case WM_T_82541:
   5221 	case WM_T_82541_2:
   5222 	case WM_T_82547:
   5223 	case WM_T_82547_2:
   5224 		delay(20000);
   5225 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5226 		break;
   5227 	case WM_T_82571:
   5228 	case WM_T_82572:
   5229 	case WM_T_82573:
   5230 	case WM_T_82574:
   5231 	case WM_T_82583:
   5232 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5233 			delay(10);
   5234 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5235 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5236 			CSR_WRITE_FLUSH(sc);
   5237 		}
   5238 		/* check EECD_EE_AUTORD */
   5239 		wm_get_auto_rd_done(sc);
   5240 		/*
   5241 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5242 		 * is set.
   5243 		 */
   5244 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5245 		    || (sc->sc_type == WM_T_82583))
   5246 			delay(25*1000);
   5247 		break;
   5248 	case WM_T_82575:
   5249 	case WM_T_82576:
   5250 	case WM_T_82580:
   5251 	case WM_T_I350:
   5252 	case WM_T_I354:
   5253 	case WM_T_I210:
   5254 	case WM_T_I211:
   5255 	case WM_T_80003:
   5256 		/* check EECD_EE_AUTORD */
   5257 		wm_get_auto_rd_done(sc);
   5258 		break;
   5259 	case WM_T_ICH8:
   5260 	case WM_T_ICH9:
   5261 	case WM_T_ICH10:
   5262 	case WM_T_PCH:
   5263 	case WM_T_PCH2:
   5264 	case WM_T_PCH_LPT:
   5265 	case WM_T_PCH_SPT:
   5266 	case WM_T_PCH_CNP:
   5267 		break;
   5268 	default:
   5269 		panic("%s: unknown type\n", __func__);
   5270 	}
   5271 
   5272 	/* Check whether EEPROM is present or not */
   5273 	switch (sc->sc_type) {
   5274 	case WM_T_82575:
   5275 	case WM_T_82576:
   5276 	case WM_T_82580:
   5277 	case WM_T_I350:
   5278 	case WM_T_I354:
   5279 	case WM_T_ICH8:
   5280 	case WM_T_ICH9:
   5281 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5282 			/* Not found */
   5283 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5284 			if (sc->sc_type == WM_T_82575)
   5285 				wm_reset_init_script_82575(sc);
   5286 		}
   5287 		break;
   5288 	default:
   5289 		break;
   5290 	}
   5291 
   5292 	if (phy_reset != 0)
   5293 		wm_phy_post_reset(sc);
   5294 
   5295 	if ((sc->sc_type == WM_T_82580)
   5296 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5297 		/* Clear global device reset status bit */
   5298 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5299 	}
   5300 
   5301 	/* Clear any pending interrupt events. */
   5302 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5303 	reg = CSR_READ(sc, WMREG_ICR);
   5304 	if (wm_is_using_msix(sc)) {
   5305 		if (sc->sc_type != WM_T_82574) {
   5306 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5307 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5308 		} else
   5309 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5310 	}
   5311 
   5312 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5313 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5314 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5315 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5316 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5317 		reg |= KABGTXD_BGSQLBIAS;
   5318 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5319 	}
   5320 
   5321 	/* Reload sc_ctrl */
   5322 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5323 
   5324 	wm_set_eee(sc);
   5325 
   5326 	/*
   5327 	 * For PCH, this write will make sure that any noise will be detected
   5328 	 * as a CRC error and be dropped rather than show up as a bad packet
   5329 	 * to the DMA engine
   5330 	 */
   5331 	if (sc->sc_type == WM_T_PCH)
   5332 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5333 
   5334 	if (sc->sc_type >= WM_T_82544)
   5335 		CSR_WRITE(sc, WMREG_WUC, 0);
   5336 
   5337 	if (sc->sc_type < WM_T_82575)
   5338 		wm_disable_aspm(sc); /* Workaround for some chips */
   5339 
   5340 	wm_reset_mdicnfg_82580(sc);
   5341 
   5342 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5343 		wm_pll_workaround_i210(sc);
   5344 
   5345 	if (sc->sc_type == WM_T_80003) {
   5346 		/* Default to TRUE to enable the MDIC W/A */
   5347 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5348 
   5349 		rv = wm_kmrn_readreg(sc,
   5350 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5351 		if (rv == 0) {
   5352 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5353 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5354 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5355 			else
   5356 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5357 		}
   5358 	}
   5359 }
   5360 
   5361 /*
   5362  * wm_add_rxbuf:
   5363  *
   5364  *	Add a receive buffer to the indiciated descriptor.
   5365  */
   5366 static int
   5367 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5368 {
   5369 	struct wm_softc *sc = rxq->rxq_sc;
   5370 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5371 	struct mbuf *m;
   5372 	int error;
   5373 
   5374 	KASSERT(mutex_owned(rxq->rxq_lock));
   5375 
   5376 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5377 	if (m == NULL)
   5378 		return ENOBUFS;
   5379 
   5380 	MCLGET(m, M_DONTWAIT);
   5381 	if ((m->m_flags & M_EXT) == 0) {
   5382 		m_freem(m);
   5383 		return ENOBUFS;
   5384 	}
   5385 
   5386 	if (rxs->rxs_mbuf != NULL)
   5387 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5388 
   5389 	rxs->rxs_mbuf = m;
   5390 
   5391 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5392 	/*
   5393 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5394 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5395 	 */
   5396 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5397 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5398 	if (error) {
   5399 		/* XXX XXX XXX */
   5400 		aprint_error_dev(sc->sc_dev,
   5401 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5402 		panic("wm_add_rxbuf");
   5403 	}
   5404 
   5405 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5406 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5407 
   5408 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5409 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5410 			wm_init_rxdesc(rxq, idx);
   5411 	} else
   5412 		wm_init_rxdesc(rxq, idx);
   5413 
   5414 	return 0;
   5415 }
   5416 
   5417 /*
   5418  * wm_rxdrain:
   5419  *
   5420  *	Drain the receive queue.
   5421  */
   5422 static void
   5423 wm_rxdrain(struct wm_rxqueue *rxq)
   5424 {
   5425 	struct wm_softc *sc = rxq->rxq_sc;
   5426 	struct wm_rxsoft *rxs;
   5427 	int i;
   5428 
   5429 	KASSERT(mutex_owned(rxq->rxq_lock));
   5430 
   5431 	for (i = 0; i < WM_NRXDESC; i++) {
   5432 		rxs = &rxq->rxq_soft[i];
   5433 		if (rxs->rxs_mbuf != NULL) {
   5434 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5435 			m_freem(rxs->rxs_mbuf);
   5436 			rxs->rxs_mbuf = NULL;
   5437 		}
   5438 	}
   5439 }
   5440 
   5441 /*
   5442  * Setup registers for RSS.
   5443  *
   5444  * XXX not yet VMDq support
   5445  */
   5446 static void
   5447 wm_init_rss(struct wm_softc *sc)
   5448 {
   5449 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5450 	int i;
   5451 
   5452 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5453 
   5454 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5455 		unsigned int qid, reta_ent;
   5456 
   5457 		qid  = i % sc->sc_nqueues;
   5458 		switch (sc->sc_type) {
   5459 		case WM_T_82574:
   5460 			reta_ent = __SHIFTIN(qid,
   5461 			    RETA_ENT_QINDEX_MASK_82574);
   5462 			break;
   5463 		case WM_T_82575:
   5464 			reta_ent = __SHIFTIN(qid,
   5465 			    RETA_ENT_QINDEX1_MASK_82575);
   5466 			break;
   5467 		default:
   5468 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5469 			break;
   5470 		}
   5471 
   5472 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5473 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5474 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5475 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5476 	}
   5477 
   5478 	rss_getkey((uint8_t *)rss_key);
   5479 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5480 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5481 
   5482 	if (sc->sc_type == WM_T_82574)
   5483 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5484 	else
   5485 		mrqc = MRQC_ENABLE_RSS_MQ;
   5486 
   5487 	/*
   5488 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5489 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5490 	 */
   5491 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5492 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5493 #if 0
   5494 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5495 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5496 #endif
   5497 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5498 
   5499 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5500 }
   5501 
   5502 /*
   5503  * Adjust TX and RX queue numbers which the system actulally uses.
   5504  *
   5505  * The numbers are affected by below parameters.
   5506  *     - The nubmer of hardware queues
   5507  *     - The number of MSI-X vectors (= "nvectors" argument)
   5508  *     - ncpu
   5509  */
   5510 static void
   5511 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5512 {
   5513 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5514 
   5515 	if (nvectors < 2) {
   5516 		sc->sc_nqueues = 1;
   5517 		return;
   5518 	}
   5519 
   5520 	switch (sc->sc_type) {
   5521 	case WM_T_82572:
   5522 		hw_ntxqueues = 2;
   5523 		hw_nrxqueues = 2;
   5524 		break;
   5525 	case WM_T_82574:
   5526 		hw_ntxqueues = 2;
   5527 		hw_nrxqueues = 2;
   5528 		break;
   5529 	case WM_T_82575:
   5530 		hw_ntxqueues = 4;
   5531 		hw_nrxqueues = 4;
   5532 		break;
   5533 	case WM_T_82576:
   5534 		hw_ntxqueues = 16;
   5535 		hw_nrxqueues = 16;
   5536 		break;
   5537 	case WM_T_82580:
   5538 	case WM_T_I350:
   5539 	case WM_T_I354:
   5540 		hw_ntxqueues = 8;
   5541 		hw_nrxqueues = 8;
   5542 		break;
   5543 	case WM_T_I210:
   5544 		hw_ntxqueues = 4;
   5545 		hw_nrxqueues = 4;
   5546 		break;
   5547 	case WM_T_I211:
   5548 		hw_ntxqueues = 2;
   5549 		hw_nrxqueues = 2;
   5550 		break;
   5551 		/*
   5552 		 * As below ethernet controllers does not support MSI-X,
   5553 		 * this driver let them not use multiqueue.
   5554 		 *     - WM_T_80003
   5555 		 *     - WM_T_ICH8
   5556 		 *     - WM_T_ICH9
   5557 		 *     - WM_T_ICH10
   5558 		 *     - WM_T_PCH
   5559 		 *     - WM_T_PCH2
   5560 		 *     - WM_T_PCH_LPT
   5561 		 */
   5562 	default:
   5563 		hw_ntxqueues = 1;
   5564 		hw_nrxqueues = 1;
   5565 		break;
   5566 	}
   5567 
   5568 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5569 
   5570 	/*
   5571 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5572 	 * the number of queues used actually.
   5573 	 */
   5574 	if (nvectors < hw_nqueues + 1)
   5575 		sc->sc_nqueues = nvectors - 1;
   5576 	else
   5577 		sc->sc_nqueues = hw_nqueues;
   5578 
   5579 	/*
   5580 	 * As queues more then cpus cannot improve scaling, we limit
   5581 	 * the number of queues used actually.
   5582 	 */
   5583 	if (ncpu < sc->sc_nqueues)
   5584 		sc->sc_nqueues = ncpu;
   5585 }
   5586 
   5587 static inline bool
   5588 wm_is_using_msix(struct wm_softc *sc)
   5589 {
   5590 
   5591 	return (sc->sc_nintrs > 1);
   5592 }
   5593 
   5594 static inline bool
   5595 wm_is_using_multiqueue(struct wm_softc *sc)
   5596 {
   5597 
   5598 	return (sc->sc_nqueues > 1);
   5599 }
   5600 
   5601 static int
   5602 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   5603 {
   5604 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5605 
   5606 	wmq->wmq_id = qidx;
   5607 	wmq->wmq_intr_idx = intr_idx;
   5608 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   5609 	    wm_handle_queue, wmq);
   5610 	if (wmq->wmq_si != NULL)
   5611 		return 0;
   5612 
   5613 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5614 	    wmq->wmq_id);
   5615 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5616 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5617 	return ENOMEM;
   5618 }
   5619 
   5620 /*
   5621  * Both single interrupt MSI and INTx can use this function.
   5622  */
   5623 static int
   5624 wm_setup_legacy(struct wm_softc *sc)
   5625 {
   5626 	pci_chipset_tag_t pc = sc->sc_pc;
   5627 	const char *intrstr = NULL;
   5628 	char intrbuf[PCI_INTRSTR_LEN];
   5629 	int error;
   5630 
   5631 	error = wm_alloc_txrx_queues(sc);
   5632 	if (error) {
   5633 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5634 		    error);
   5635 		return ENOMEM;
   5636 	}
   5637 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5638 	    sizeof(intrbuf));
   5639 #ifdef WM_MPSAFE
   5640 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5641 #endif
   5642 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5643 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5644 	if (sc->sc_ihs[0] == NULL) {
   5645 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5646 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5647 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5648 		return ENOMEM;
   5649 	}
   5650 
   5651 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5652 	sc->sc_nintrs = 1;
   5653 
   5654 	return wm_softint_establish_queue(sc, 0, 0);
   5655 }
   5656 
   5657 static int
   5658 wm_setup_msix(struct wm_softc *sc)
   5659 {
   5660 	void *vih;
   5661 	kcpuset_t *affinity;
   5662 	int qidx, error, intr_idx, txrx_established;
   5663 	pci_chipset_tag_t pc = sc->sc_pc;
   5664 	const char *intrstr = NULL;
   5665 	char intrbuf[PCI_INTRSTR_LEN];
   5666 	char intr_xname[INTRDEVNAMEBUF];
   5667 
   5668 	if (sc->sc_nqueues < ncpu) {
   5669 		/*
   5670 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5671 		 * interrupts start from CPU#1.
   5672 		 */
   5673 		sc->sc_affinity_offset = 1;
   5674 	} else {
   5675 		/*
   5676 		 * In this case, this device use all CPUs. So, we unify
   5677 		 * affinitied cpu_index to msix vector number for readability.
   5678 		 */
   5679 		sc->sc_affinity_offset = 0;
   5680 	}
   5681 
   5682 	error = wm_alloc_txrx_queues(sc);
   5683 	if (error) {
   5684 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5685 		    error);
   5686 		return ENOMEM;
   5687 	}
   5688 
   5689 	kcpuset_create(&affinity, false);
   5690 	intr_idx = 0;
   5691 
   5692 	/*
   5693 	 * TX and RX
   5694 	 */
   5695 	txrx_established = 0;
   5696 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5697 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5698 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5699 
   5700 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5701 		    sizeof(intrbuf));
   5702 #ifdef WM_MPSAFE
   5703 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5704 		    PCI_INTR_MPSAFE, true);
   5705 #endif
   5706 		memset(intr_xname, 0, sizeof(intr_xname));
   5707 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5708 		    device_xname(sc->sc_dev), qidx);
   5709 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5710 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5711 		if (vih == NULL) {
   5712 			aprint_error_dev(sc->sc_dev,
   5713 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5714 			    intrstr ? " at " : "",
   5715 			    intrstr ? intrstr : "");
   5716 
   5717 			goto fail;
   5718 		}
   5719 		kcpuset_zero(affinity);
   5720 		/* Round-robin affinity */
   5721 		kcpuset_set(affinity, affinity_to);
   5722 		error = interrupt_distribute(vih, affinity, NULL);
   5723 		if (error == 0) {
   5724 			aprint_normal_dev(sc->sc_dev,
   5725 			    "for TX and RX interrupting at %s affinity to %u\n",
   5726 			    intrstr, affinity_to);
   5727 		} else {
   5728 			aprint_normal_dev(sc->sc_dev,
   5729 			    "for TX and RX interrupting at %s\n", intrstr);
   5730 		}
   5731 		sc->sc_ihs[intr_idx] = vih;
   5732 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   5733 			goto fail;
   5734 		txrx_established++;
   5735 		intr_idx++;
   5736 	}
   5737 
   5738 	/* LINK */
   5739 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5740 	    sizeof(intrbuf));
   5741 #ifdef WM_MPSAFE
   5742 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5743 #endif
   5744 	memset(intr_xname, 0, sizeof(intr_xname));
   5745 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5746 	    device_xname(sc->sc_dev));
   5747 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5748 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5749 	if (vih == NULL) {
   5750 		aprint_error_dev(sc->sc_dev,
   5751 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5752 		    intrstr ? " at " : "",
   5753 		    intrstr ? intrstr : "");
   5754 
   5755 		goto fail;
   5756 	}
   5757 	/* Keep default affinity to LINK interrupt */
   5758 	aprint_normal_dev(sc->sc_dev,
   5759 	    "for LINK interrupting at %s\n", intrstr);
   5760 	sc->sc_ihs[intr_idx] = vih;
   5761 	sc->sc_link_intr_idx = intr_idx;
   5762 
   5763 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5764 	kcpuset_destroy(affinity);
   5765 	return 0;
   5766 
   5767  fail:
   5768 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5769 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5770 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5771 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5772 	}
   5773 
   5774 	kcpuset_destroy(affinity);
   5775 	return ENOMEM;
   5776 }
   5777 
   5778 static void
   5779 wm_unset_stopping_flags(struct wm_softc *sc)
   5780 {
   5781 	int i;
   5782 
   5783 	KASSERT(WM_CORE_LOCKED(sc));
   5784 
   5785 	/* Must unset stopping flags in ascending order. */
   5786 	for (i = 0; i < sc->sc_nqueues; i++) {
   5787 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5788 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5789 
   5790 		mutex_enter(txq->txq_lock);
   5791 		txq->txq_stopping = false;
   5792 		mutex_exit(txq->txq_lock);
   5793 
   5794 		mutex_enter(rxq->rxq_lock);
   5795 		rxq->rxq_stopping = false;
   5796 		mutex_exit(rxq->rxq_lock);
   5797 	}
   5798 
   5799 	sc->sc_core_stopping = false;
   5800 }
   5801 
   5802 static void
   5803 wm_set_stopping_flags(struct wm_softc *sc)
   5804 {
   5805 	int i;
   5806 
   5807 	KASSERT(WM_CORE_LOCKED(sc));
   5808 
   5809 	sc->sc_core_stopping = true;
   5810 
   5811 	/* Must set stopping flags in ascending order. */
   5812 	for (i = 0; i < sc->sc_nqueues; i++) {
   5813 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5814 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5815 
   5816 		mutex_enter(rxq->rxq_lock);
   5817 		rxq->rxq_stopping = true;
   5818 		mutex_exit(rxq->rxq_lock);
   5819 
   5820 		mutex_enter(txq->txq_lock);
   5821 		txq->txq_stopping = true;
   5822 		mutex_exit(txq->txq_lock);
   5823 	}
   5824 }
   5825 
   5826 /*
   5827  * Write interrupt interval value to ITR or EITR
   5828  */
   5829 static void
   5830 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5831 {
   5832 
   5833 	if (!wmq->wmq_set_itr)
   5834 		return;
   5835 
   5836 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5837 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5838 
   5839 		/*
   5840 		 * 82575 doesn't have CNT_INGR field.
   5841 		 * So, overwrite counter field by software.
   5842 		 */
   5843 		if (sc->sc_type == WM_T_82575)
   5844 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5845 		else
   5846 			eitr |= EITR_CNT_INGR;
   5847 
   5848 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5849 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5850 		/*
   5851 		 * 82574 has both ITR and EITR. SET EITR when we use
   5852 		 * the multi queue function with MSI-X.
   5853 		 */
   5854 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5855 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5856 	} else {
   5857 		KASSERT(wmq->wmq_id == 0);
   5858 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5859 	}
   5860 
   5861 	wmq->wmq_set_itr = false;
   5862 }
   5863 
   5864 /*
   5865  * TODO
   5866  * Below dynamic calculation of itr is almost the same as linux igb,
   5867  * however it does not fit to wm(4). So, we will have been disable AIM
   5868  * until we will find appropriate calculation of itr.
   5869  */
   5870 /*
   5871  * calculate interrupt interval value to be going to write register in
   5872  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5873  */
   5874 static void
   5875 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5876 {
   5877 #ifdef NOTYET
   5878 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5879 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5880 	uint32_t avg_size = 0;
   5881 	uint32_t new_itr;
   5882 
   5883 	if (rxq->rxq_packets)
   5884 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5885 	if (txq->txq_packets)
   5886 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5887 
   5888 	if (avg_size == 0) {
   5889 		new_itr = 450; /* restore default value */
   5890 		goto out;
   5891 	}
   5892 
   5893 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5894 	avg_size += 24;
   5895 
   5896 	/* Don't starve jumbo frames */
   5897 	avg_size = uimin(avg_size, 3000);
   5898 
   5899 	/* Give a little boost to mid-size frames */
   5900 	if ((avg_size > 300) && (avg_size < 1200))
   5901 		new_itr = avg_size / 3;
   5902 	else
   5903 		new_itr = avg_size / 2;
   5904 
   5905 out:
   5906 	/*
   5907 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5908 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5909 	 */
   5910 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5911 		new_itr *= 4;
   5912 
   5913 	if (new_itr != wmq->wmq_itr) {
   5914 		wmq->wmq_itr = new_itr;
   5915 		wmq->wmq_set_itr = true;
   5916 	} else
   5917 		wmq->wmq_set_itr = false;
   5918 
   5919 	rxq->rxq_packets = 0;
   5920 	rxq->rxq_bytes = 0;
   5921 	txq->txq_packets = 0;
   5922 	txq->txq_bytes = 0;
   5923 #endif
   5924 }
   5925 
   5926 static void
   5927 wm_init_sysctls(struct wm_softc *sc)
   5928 {
   5929 	struct sysctllog **log;
   5930 	const struct sysctlnode *rnode, *qnode, *cnode;
   5931 	int i, rv;
   5932 	const char *dvname;
   5933 
   5934 	log = &sc->sc_sysctllog;
   5935 	dvname = device_xname(sc->sc_dev);
   5936 
   5937 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5938 	    0, CTLTYPE_NODE, dvname,
   5939 	    SYSCTL_DESCR("wm information and settings"),
   5940 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5941 	if (rv != 0)
   5942 		goto err;
   5943 
   5944 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5945 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5946 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5947 	if (rv != 0)
   5948 		goto teardown;
   5949 
   5950 	for (i = 0; i < sc->sc_nqueues; i++) {
   5951 		struct wm_queue *wmq = &sc->sc_queue[i];
   5952 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5953 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5954 
   5955 		snprintf(sc->sc_queue[i].sysctlname,
   5956 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   5957 
   5958 		if (sysctl_createv(log, 0, &rnode, &qnode,
   5959 		    0, CTLTYPE_NODE,
   5960 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   5961 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   5962 			break;
   5963 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5964 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5965 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   5966 		    NULL, 0, &txq->txq_free,
   5967 		    0, CTL_CREATE, CTL_EOL) != 0)
   5968 			break;
   5969 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5970 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5971 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   5972 		    NULL, 0, &txq->txq_next,
   5973 		    0, CTL_CREATE, CTL_EOL) != 0)
   5974 			break;
   5975 
   5976 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5977 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5978 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   5979 		    NULL, 0, &rxq->rxq_ptr,
   5980 		    0, CTL_CREATE, CTL_EOL) != 0)
   5981 			break;
   5982 	}
   5983 
   5984 #ifdef WM_DEBUG
   5985 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5986 	    CTLTYPE_INT, "debug_flags",
   5987 	    SYSCTL_DESCR(
   5988 		    "Debug flags:\n"	\
   5989 		    "\t0x01 LINK\n"	\
   5990 		    "\t0x02 TX\n"	\
   5991 		    "\t0x04 RX\n"	\
   5992 		    "\t0x08 GMII\n"	\
   5993 		    "\t0x10 MANAGE\n"	\
   5994 		    "\t0x20 NVM\n"	\
   5995 		    "\t0x40 INIT\n"	\
   5996 		    "\t0x80 LOCK"),
   5997 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   5998 	if (rv != 0)
   5999 		goto teardown;
   6000 #endif
   6001 
   6002 	return;
   6003 
   6004 teardown:
   6005 	sysctl_teardown(log);
   6006 err:
   6007 	sc->sc_sysctllog = NULL;
   6008 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6009 	    __func__, rv);
   6010 }
   6011 
   6012 /*
   6013  * wm_init:		[ifnet interface function]
   6014  *
   6015  *	Initialize the interface.
   6016  */
   6017 static int
   6018 wm_init(struct ifnet *ifp)
   6019 {
   6020 	struct wm_softc *sc = ifp->if_softc;
   6021 	int ret;
   6022 
   6023 	WM_CORE_LOCK(sc);
   6024 	ret = wm_init_locked(ifp);
   6025 	WM_CORE_UNLOCK(sc);
   6026 
   6027 	return ret;
   6028 }
   6029 
   6030 static int
   6031 wm_init_locked(struct ifnet *ifp)
   6032 {
   6033 	struct wm_softc *sc = ifp->if_softc;
   6034 	struct ethercom *ec = &sc->sc_ethercom;
   6035 	int i, j, trynum, error = 0;
   6036 	uint32_t reg, sfp_mask = 0;
   6037 
   6038 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6039 		device_xname(sc->sc_dev), __func__));
   6040 	KASSERT(WM_CORE_LOCKED(sc));
   6041 
   6042 	/*
   6043 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6044 	 * There is a small but measurable benefit to avoiding the adjusment
   6045 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6046 	 * on such platforms.  One possibility is that the DMA itself is
   6047 	 * slightly more efficient if the front of the entire packet (instead
   6048 	 * of the front of the headers) is aligned.
   6049 	 *
   6050 	 * Note we must always set align_tweak to 0 if we are using
   6051 	 * jumbo frames.
   6052 	 */
   6053 #ifdef __NO_STRICT_ALIGNMENT
   6054 	sc->sc_align_tweak = 0;
   6055 #else
   6056 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6057 		sc->sc_align_tweak = 0;
   6058 	else
   6059 		sc->sc_align_tweak = 2;
   6060 #endif /* __NO_STRICT_ALIGNMENT */
   6061 
   6062 	/* Cancel any pending I/O. */
   6063 	wm_stop_locked(ifp, false, false);
   6064 
   6065 	/* Update statistics before reset */
   6066 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6067 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6068 
   6069 	/* PCH_SPT hardware workaround */
   6070 	if (sc->sc_type == WM_T_PCH_SPT)
   6071 		wm_flush_desc_rings(sc);
   6072 
   6073 	/* Reset the chip to a known state. */
   6074 	wm_reset(sc);
   6075 
   6076 	/*
   6077 	 * AMT based hardware can now take control from firmware
   6078 	 * Do this after reset.
   6079 	 */
   6080 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6081 		wm_get_hw_control(sc);
   6082 
   6083 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6084 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6085 		wm_legacy_irq_quirk_spt(sc);
   6086 
   6087 	/* Init hardware bits */
   6088 	wm_initialize_hardware_bits(sc);
   6089 
   6090 	/* Reset the PHY. */
   6091 	if (sc->sc_flags & WM_F_HAS_MII)
   6092 		wm_gmii_reset(sc);
   6093 
   6094 	if (sc->sc_type >= WM_T_ICH8) {
   6095 		reg = CSR_READ(sc, WMREG_GCR);
   6096 		/*
   6097 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6098 		 * default after reset.
   6099 		 */
   6100 		if (sc->sc_type == WM_T_ICH8)
   6101 			reg |= GCR_NO_SNOOP_ALL;
   6102 		else
   6103 			reg &= ~GCR_NO_SNOOP_ALL;
   6104 		CSR_WRITE(sc, WMREG_GCR, reg);
   6105 	}
   6106 
   6107 	if ((sc->sc_type >= WM_T_ICH8)
   6108 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6109 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6110 
   6111 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6112 		reg |= CTRL_EXT_RO_DIS;
   6113 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6114 	}
   6115 
   6116 	/* Calculate (E)ITR value */
   6117 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6118 		/*
   6119 		 * For NEWQUEUE's EITR (except for 82575).
   6120 		 * 82575's EITR should be set same throttling value as other
   6121 		 * old controllers' ITR because the interrupt/sec calculation
   6122 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6123 		 *
   6124 		 * 82574's EITR should be set same throttling value as ITR.
   6125 		 *
   6126 		 * For N interrupts/sec, set this value to:
   6127 		 * 1,000,000 / N in contrast to ITR throttoling value.
   6128 		 */
   6129 		sc->sc_itr_init = 450;
   6130 	} else if (sc->sc_type >= WM_T_82543) {
   6131 		/*
   6132 		 * Set up the interrupt throttling register (units of 256ns)
   6133 		 * Note that a footnote in Intel's documentation says this
   6134 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6135 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6136 		 * that that is also true for the 1024ns units of the other
   6137 		 * interrupt-related timer registers -- so, really, we ought
   6138 		 * to divide this value by 4 when the link speed is low.
   6139 		 *
   6140 		 * XXX implement this division at link speed change!
   6141 		 */
   6142 
   6143 		/*
   6144 		 * For N interrupts/sec, set this value to:
   6145 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6146 		 * absolute and packet timer values to this value
   6147 		 * divided by 4 to get "simple timer" behavior.
   6148 		 */
   6149 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6150 	}
   6151 
   6152 	error = wm_init_txrx_queues(sc);
   6153 	if (error)
   6154 		goto out;
   6155 
   6156 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6157 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6158 	    (sc->sc_type >= WM_T_82575))
   6159 		wm_serdes_power_up_link_82575(sc);
   6160 
   6161 	/* Clear out the VLAN table -- we don't use it (yet). */
   6162 	CSR_WRITE(sc, WMREG_VET, 0);
   6163 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6164 		trynum = 10; /* Due to hw errata */
   6165 	else
   6166 		trynum = 1;
   6167 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6168 		for (j = 0; j < trynum; j++)
   6169 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6170 
   6171 	/*
   6172 	 * Set up flow-control parameters.
   6173 	 *
   6174 	 * XXX Values could probably stand some tuning.
   6175 	 */
   6176 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6177 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6178 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6179 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6180 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6181 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6182 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6183 	}
   6184 
   6185 	sc->sc_fcrtl = FCRTL_DFLT;
   6186 	if (sc->sc_type < WM_T_82543) {
   6187 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6188 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6189 	} else {
   6190 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6191 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6192 	}
   6193 
   6194 	if (sc->sc_type == WM_T_80003)
   6195 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6196 	else
   6197 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6198 
   6199 	/* Writes the control register. */
   6200 	wm_set_vlan(sc);
   6201 
   6202 	if (sc->sc_flags & WM_F_HAS_MII) {
   6203 		uint16_t kmreg;
   6204 
   6205 		switch (sc->sc_type) {
   6206 		case WM_T_80003:
   6207 		case WM_T_ICH8:
   6208 		case WM_T_ICH9:
   6209 		case WM_T_ICH10:
   6210 		case WM_T_PCH:
   6211 		case WM_T_PCH2:
   6212 		case WM_T_PCH_LPT:
   6213 		case WM_T_PCH_SPT:
   6214 		case WM_T_PCH_CNP:
   6215 			/*
   6216 			 * Set the mac to wait the maximum time between each
   6217 			 * iteration and increase the max iterations when
   6218 			 * polling the phy; this fixes erroneous timeouts at
   6219 			 * 10Mbps.
   6220 			 */
   6221 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6222 			    0xFFFF);
   6223 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6224 			    &kmreg);
   6225 			kmreg |= 0x3F;
   6226 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6227 			    kmreg);
   6228 			break;
   6229 		default:
   6230 			break;
   6231 		}
   6232 
   6233 		if (sc->sc_type == WM_T_80003) {
   6234 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6235 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6236 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6237 
   6238 			/* Bypass RX and TX FIFO's */
   6239 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6240 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6241 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6242 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6243 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6244 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6245 		}
   6246 	}
   6247 #if 0
   6248 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6249 #endif
   6250 
   6251 	/* Set up checksum offload parameters. */
   6252 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6253 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6254 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6255 		reg |= RXCSUM_IPOFL;
   6256 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6257 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6258 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6259 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6260 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6261 
   6262 	/* Set registers about MSI-X */
   6263 	if (wm_is_using_msix(sc)) {
   6264 		uint32_t ivar, qintr_idx;
   6265 		struct wm_queue *wmq;
   6266 		unsigned int qid;
   6267 
   6268 		if (sc->sc_type == WM_T_82575) {
   6269 			/* Interrupt control */
   6270 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6271 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6272 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6273 
   6274 			/* TX and RX */
   6275 			for (i = 0; i < sc->sc_nqueues; i++) {
   6276 				wmq = &sc->sc_queue[i];
   6277 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6278 				    EITR_TX_QUEUE(wmq->wmq_id)
   6279 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6280 			}
   6281 			/* Link status */
   6282 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6283 			    EITR_OTHER);
   6284 		} else if (sc->sc_type == WM_T_82574) {
   6285 			/* Interrupt control */
   6286 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6287 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6288 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6289 
   6290 			/*
   6291 			 * Workaround issue with spurious interrupts
   6292 			 * in MSI-X mode.
   6293 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6294 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6295 			 */
   6296 			reg = CSR_READ(sc, WMREG_RFCTL);
   6297 			reg |= WMREG_RFCTL_ACKDIS;
   6298 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6299 
   6300 			ivar = 0;
   6301 			/* TX and RX */
   6302 			for (i = 0; i < sc->sc_nqueues; i++) {
   6303 				wmq = &sc->sc_queue[i];
   6304 				qid = wmq->wmq_id;
   6305 				qintr_idx = wmq->wmq_intr_idx;
   6306 
   6307 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6308 				    IVAR_TX_MASK_Q_82574(qid));
   6309 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6310 				    IVAR_RX_MASK_Q_82574(qid));
   6311 			}
   6312 			/* Link status */
   6313 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6314 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6315 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6316 		} else {
   6317 			/* Interrupt control */
   6318 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6319 			    | GPIE_EIAME | GPIE_PBA);
   6320 
   6321 			switch (sc->sc_type) {
   6322 			case WM_T_82580:
   6323 			case WM_T_I350:
   6324 			case WM_T_I354:
   6325 			case WM_T_I210:
   6326 			case WM_T_I211:
   6327 				/* TX and RX */
   6328 				for (i = 0; i < sc->sc_nqueues; i++) {
   6329 					wmq = &sc->sc_queue[i];
   6330 					qid = wmq->wmq_id;
   6331 					qintr_idx = wmq->wmq_intr_idx;
   6332 
   6333 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6334 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6335 					ivar |= __SHIFTIN((qintr_idx
   6336 						| IVAR_VALID),
   6337 					    IVAR_TX_MASK_Q(qid));
   6338 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6339 					ivar |= __SHIFTIN((qintr_idx
   6340 						| IVAR_VALID),
   6341 					    IVAR_RX_MASK_Q(qid));
   6342 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6343 				}
   6344 				break;
   6345 			case WM_T_82576:
   6346 				/* TX and RX */
   6347 				for (i = 0; i < sc->sc_nqueues; i++) {
   6348 					wmq = &sc->sc_queue[i];
   6349 					qid = wmq->wmq_id;
   6350 					qintr_idx = wmq->wmq_intr_idx;
   6351 
   6352 					ivar = CSR_READ(sc,
   6353 					    WMREG_IVAR_Q_82576(qid));
   6354 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6355 					ivar |= __SHIFTIN((qintr_idx
   6356 						| IVAR_VALID),
   6357 					    IVAR_TX_MASK_Q_82576(qid));
   6358 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6359 					ivar |= __SHIFTIN((qintr_idx
   6360 						| IVAR_VALID),
   6361 					    IVAR_RX_MASK_Q_82576(qid));
   6362 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6363 					    ivar);
   6364 				}
   6365 				break;
   6366 			default:
   6367 				break;
   6368 			}
   6369 
   6370 			/* Link status */
   6371 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6372 			    IVAR_MISC_OTHER);
   6373 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6374 		}
   6375 
   6376 		if (wm_is_using_multiqueue(sc)) {
   6377 			wm_init_rss(sc);
   6378 
   6379 			/*
   6380 			** NOTE: Receive Full-Packet Checksum Offload
   6381 			** is mutually exclusive with Multiqueue. However
   6382 			** this is not the same as TCP/IP checksums which
   6383 			** still work.
   6384 			*/
   6385 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6386 			reg |= RXCSUM_PCSD;
   6387 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6388 		}
   6389 	}
   6390 
   6391 	/* Set up the interrupt registers. */
   6392 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6393 
   6394 	/* Enable SFP module insertion interrupt if it's required */
   6395 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6396 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6397 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6398 		sfp_mask = ICR_GPI(0);
   6399 	}
   6400 
   6401 	if (wm_is_using_msix(sc)) {
   6402 		uint32_t mask;
   6403 		struct wm_queue *wmq;
   6404 
   6405 		switch (sc->sc_type) {
   6406 		case WM_T_82574:
   6407 			mask = 0;
   6408 			for (i = 0; i < sc->sc_nqueues; i++) {
   6409 				wmq = &sc->sc_queue[i];
   6410 				mask |= ICR_TXQ(wmq->wmq_id);
   6411 				mask |= ICR_RXQ(wmq->wmq_id);
   6412 			}
   6413 			mask |= ICR_OTHER;
   6414 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6415 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6416 			break;
   6417 		default:
   6418 			if (sc->sc_type == WM_T_82575) {
   6419 				mask = 0;
   6420 				for (i = 0; i < sc->sc_nqueues; i++) {
   6421 					wmq = &sc->sc_queue[i];
   6422 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6423 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6424 				}
   6425 				mask |= EITR_OTHER;
   6426 			} else {
   6427 				mask = 0;
   6428 				for (i = 0; i < sc->sc_nqueues; i++) {
   6429 					wmq = &sc->sc_queue[i];
   6430 					mask |= 1 << wmq->wmq_intr_idx;
   6431 				}
   6432 				mask |= 1 << sc->sc_link_intr_idx;
   6433 			}
   6434 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6435 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6436 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6437 
   6438 			/* For other interrupts */
   6439 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6440 			break;
   6441 		}
   6442 	} else {
   6443 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6444 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6445 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6446 	}
   6447 
   6448 	/* Set up the inter-packet gap. */
   6449 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6450 
   6451 	if (sc->sc_type >= WM_T_82543) {
   6452 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6453 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6454 			wm_itrs_writereg(sc, wmq);
   6455 		}
   6456 		/*
   6457 		 * Link interrupts occur much less than TX
   6458 		 * interrupts and RX interrupts. So, we don't
   6459 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6460 		 * FreeBSD's if_igb.
   6461 		 */
   6462 	}
   6463 
   6464 	/* Set the VLAN ethernetype. */
   6465 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6466 
   6467 	/*
   6468 	 * Set up the transmit control register; we start out with
   6469 	 * a collision distance suitable for FDX, but update it whe
   6470 	 * we resolve the media type.
   6471 	 */
   6472 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6473 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6474 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6475 	if (sc->sc_type >= WM_T_82571)
   6476 		sc->sc_tctl |= TCTL_MULR;
   6477 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6478 
   6479 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6480 		/* Write TDT after TCTL.EN is set. See the document. */
   6481 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6482 	}
   6483 
   6484 	if (sc->sc_type == WM_T_80003) {
   6485 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6486 		reg &= ~TCTL_EXT_GCEX_MASK;
   6487 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6488 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6489 	}
   6490 
   6491 	/* Set the media. */
   6492 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6493 		goto out;
   6494 
   6495 	/* Configure for OS presence */
   6496 	wm_init_manageability(sc);
   6497 
   6498 	/*
   6499 	 * Set up the receive control register; we actually program the
   6500 	 * register when we set the receive filter. Use multicast address
   6501 	 * offset type 0.
   6502 	 *
   6503 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6504 	 * don't enable that feature.
   6505 	 */
   6506 	sc->sc_mchash_type = 0;
   6507 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6508 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6509 
   6510 	/* 82574 use one buffer extended Rx descriptor. */
   6511 	if (sc->sc_type == WM_T_82574)
   6512 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6513 
   6514 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   6515 		sc->sc_rctl |= RCTL_SECRC;
   6516 
   6517 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6518 	    && (ifp->if_mtu > ETHERMTU)) {
   6519 		sc->sc_rctl |= RCTL_LPE;
   6520 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6521 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6522 	}
   6523 
   6524 	if (MCLBYTES == 2048)
   6525 		sc->sc_rctl |= RCTL_2k;
   6526 	else {
   6527 		if (sc->sc_type >= WM_T_82543) {
   6528 			switch (MCLBYTES) {
   6529 			case 4096:
   6530 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6531 				break;
   6532 			case 8192:
   6533 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6534 				break;
   6535 			case 16384:
   6536 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6537 				break;
   6538 			default:
   6539 				panic("wm_init: MCLBYTES %d unsupported",
   6540 				    MCLBYTES);
   6541 				break;
   6542 			}
   6543 		} else
   6544 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6545 	}
   6546 
   6547 	/* Enable ECC */
   6548 	switch (sc->sc_type) {
   6549 	case WM_T_82571:
   6550 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6551 		reg |= PBA_ECC_CORR_EN;
   6552 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6553 		break;
   6554 	case WM_T_PCH_LPT:
   6555 	case WM_T_PCH_SPT:
   6556 	case WM_T_PCH_CNP:
   6557 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6558 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6559 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6560 
   6561 		sc->sc_ctrl |= CTRL_MEHE;
   6562 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6563 		break;
   6564 	default:
   6565 		break;
   6566 	}
   6567 
   6568 	/*
   6569 	 * Set the receive filter.
   6570 	 *
   6571 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6572 	 * the setting of RCTL.EN in wm_set_filter()
   6573 	 */
   6574 	wm_set_filter(sc);
   6575 
   6576 	/* On 575 and later set RDT only if RX enabled */
   6577 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6578 		int qidx;
   6579 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6580 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6581 			for (i = 0; i < WM_NRXDESC; i++) {
   6582 				mutex_enter(rxq->rxq_lock);
   6583 				wm_init_rxdesc(rxq, i);
   6584 				mutex_exit(rxq->rxq_lock);
   6585 
   6586 			}
   6587 		}
   6588 	}
   6589 
   6590 	wm_unset_stopping_flags(sc);
   6591 
   6592 	/* Start the one second link check clock. */
   6593 	callout_schedule(&sc->sc_tick_ch, hz);
   6594 
   6595 	/* ...all done! */
   6596 	ifp->if_flags |= IFF_RUNNING;
   6597 
   6598  out:
   6599 	/* Save last flags for the callback */
   6600 	sc->sc_if_flags = ifp->if_flags;
   6601 	sc->sc_ec_capenable = ec->ec_capenable;
   6602 	if (error)
   6603 		log(LOG_ERR, "%s: interface not running\n",
   6604 		    device_xname(sc->sc_dev));
   6605 	return error;
   6606 }
   6607 
   6608 /*
   6609  * wm_stop:		[ifnet interface function]
   6610  *
   6611  *	Stop transmission on the interface.
   6612  */
   6613 static void
   6614 wm_stop(struct ifnet *ifp, int disable)
   6615 {
   6616 	struct wm_softc *sc = ifp->if_softc;
   6617 
   6618 	ASSERT_SLEEPABLE();
   6619 
   6620 	WM_CORE_LOCK(sc);
   6621 	wm_stop_locked(ifp, disable ? true : false, true);
   6622 	WM_CORE_UNLOCK(sc);
   6623 
   6624 	/*
   6625 	 * After wm_set_stopping_flags(), it is guaranteed
   6626 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6627 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6628 	 * because it can sleep...
   6629 	 * so, call workqueue_wait() here.
   6630 	 */
   6631 	for (int i = 0; i < sc->sc_nqueues; i++)
   6632 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6633 }
   6634 
   6635 static void
   6636 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   6637 {
   6638 	struct wm_softc *sc = ifp->if_softc;
   6639 	struct wm_txsoft *txs;
   6640 	int i, qidx;
   6641 
   6642 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6643 		device_xname(sc->sc_dev), __func__));
   6644 	KASSERT(WM_CORE_LOCKED(sc));
   6645 
   6646 	wm_set_stopping_flags(sc);
   6647 
   6648 	if (sc->sc_flags & WM_F_HAS_MII) {
   6649 		/* Down the MII. */
   6650 		mii_down(&sc->sc_mii);
   6651 	} else {
   6652 #if 0
   6653 		/* Should we clear PHY's status properly? */
   6654 		wm_reset(sc);
   6655 #endif
   6656 	}
   6657 
   6658 	/* Stop the transmit and receive processes. */
   6659 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6660 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6661 	sc->sc_rctl &= ~RCTL_EN;
   6662 
   6663 	/*
   6664 	 * Clear the interrupt mask to ensure the device cannot assert its
   6665 	 * interrupt line.
   6666 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6667 	 * service any currently pending or shared interrupt.
   6668 	 */
   6669 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6670 	sc->sc_icr = 0;
   6671 	if (wm_is_using_msix(sc)) {
   6672 		if (sc->sc_type != WM_T_82574) {
   6673 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6674 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6675 		} else
   6676 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6677 	}
   6678 
   6679 	/*
   6680 	 * Stop callouts after interrupts are disabled; if we have
   6681 	 * to wait for them, we will be releasing the CORE_LOCK
   6682 	 * briefly, which will unblock interrupts on the current CPU.
   6683 	 */
   6684 
   6685 	/* Stop the one second clock. */
   6686 	if (wait)
   6687 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   6688 	else
   6689 		callout_stop(&sc->sc_tick_ch);
   6690 
   6691 	/* Stop the 82547 Tx FIFO stall check timer. */
   6692 	if (sc->sc_type == WM_T_82547) {
   6693 		if (wait)
   6694 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   6695 		else
   6696 			callout_stop(&sc->sc_txfifo_ch);
   6697 	}
   6698 
   6699 	/* Release any queued transmit buffers. */
   6700 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6701 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6702 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6703 		struct mbuf *m;
   6704 
   6705 		mutex_enter(txq->txq_lock);
   6706 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6707 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6708 			txs = &txq->txq_soft[i];
   6709 			if (txs->txs_mbuf != NULL) {
   6710 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6711 				m_freem(txs->txs_mbuf);
   6712 				txs->txs_mbuf = NULL;
   6713 			}
   6714 		}
   6715 		/* Drain txq_interq */
   6716 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6717 			m_freem(m);
   6718 		mutex_exit(txq->txq_lock);
   6719 	}
   6720 
   6721 	/* Mark the interface as down and cancel the watchdog timer. */
   6722 	ifp->if_flags &= ~IFF_RUNNING;
   6723 
   6724 	if (disable) {
   6725 		for (i = 0; i < sc->sc_nqueues; i++) {
   6726 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6727 			mutex_enter(rxq->rxq_lock);
   6728 			wm_rxdrain(rxq);
   6729 			mutex_exit(rxq->rxq_lock);
   6730 		}
   6731 	}
   6732 
   6733 #if 0 /* notyet */
   6734 	if (sc->sc_type >= WM_T_82544)
   6735 		CSR_WRITE(sc, WMREG_WUC, 0);
   6736 #endif
   6737 }
   6738 
   6739 static void
   6740 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6741 {
   6742 	struct mbuf *m;
   6743 	int i;
   6744 
   6745 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6746 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6747 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6748 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6749 		    m->m_data, m->m_len, m->m_flags);
   6750 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6751 	    i, i == 1 ? "" : "s");
   6752 }
   6753 
   6754 /*
   6755  * wm_82547_txfifo_stall:
   6756  *
   6757  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6758  *	reset the FIFO pointers, and restart packet transmission.
   6759  */
   6760 static void
   6761 wm_82547_txfifo_stall(void *arg)
   6762 {
   6763 	struct wm_softc *sc = arg;
   6764 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6765 
   6766 	mutex_enter(txq->txq_lock);
   6767 
   6768 	if (txq->txq_stopping)
   6769 		goto out;
   6770 
   6771 	if (txq->txq_fifo_stall) {
   6772 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6773 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6774 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6775 			/*
   6776 			 * Packets have drained.  Stop transmitter, reset
   6777 			 * FIFO pointers, restart transmitter, and kick
   6778 			 * the packet queue.
   6779 			 */
   6780 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6781 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6782 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6783 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6784 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6785 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6786 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6787 			CSR_WRITE_FLUSH(sc);
   6788 
   6789 			txq->txq_fifo_head = 0;
   6790 			txq->txq_fifo_stall = 0;
   6791 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6792 		} else {
   6793 			/*
   6794 			 * Still waiting for packets to drain; try again in
   6795 			 * another tick.
   6796 			 */
   6797 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6798 		}
   6799 	}
   6800 
   6801 out:
   6802 	mutex_exit(txq->txq_lock);
   6803 }
   6804 
   6805 /*
   6806  * wm_82547_txfifo_bugchk:
   6807  *
   6808  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6809  *	prevent enqueueing a packet that would wrap around the end
   6810  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6811  *
   6812  *	We do this by checking the amount of space before the end
   6813  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6814  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6815  *	the internal FIFO pointers to the beginning, and restart
   6816  *	transmission on the interface.
   6817  */
   6818 #define	WM_FIFO_HDR		0x10
   6819 #define	WM_82547_PAD_LEN	0x3e0
   6820 static int
   6821 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6822 {
   6823 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6824 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6825 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6826 
   6827 	/* Just return if already stalled. */
   6828 	if (txq->txq_fifo_stall)
   6829 		return 1;
   6830 
   6831 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6832 		/* Stall only occurs in half-duplex mode. */
   6833 		goto send_packet;
   6834 	}
   6835 
   6836 	if (len >= WM_82547_PAD_LEN + space) {
   6837 		txq->txq_fifo_stall = 1;
   6838 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6839 		return 1;
   6840 	}
   6841 
   6842  send_packet:
   6843 	txq->txq_fifo_head += len;
   6844 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6845 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6846 
   6847 	return 0;
   6848 }
   6849 
   6850 static int
   6851 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6852 {
   6853 	int error;
   6854 
   6855 	/*
   6856 	 * Allocate the control data structures, and create and load the
   6857 	 * DMA map for it.
   6858 	 *
   6859 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6860 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6861 	 * both sets within the same 4G segment.
   6862 	 */
   6863 	if (sc->sc_type < WM_T_82544)
   6864 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6865 	else
   6866 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6867 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6868 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6869 	else
   6870 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6871 
   6872 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6873 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6874 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6875 		aprint_error_dev(sc->sc_dev,
   6876 		    "unable to allocate TX control data, error = %d\n",
   6877 		    error);
   6878 		goto fail_0;
   6879 	}
   6880 
   6881 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6882 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6883 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6884 		aprint_error_dev(sc->sc_dev,
   6885 		    "unable to map TX control data, error = %d\n", error);
   6886 		goto fail_1;
   6887 	}
   6888 
   6889 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6890 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6891 		aprint_error_dev(sc->sc_dev,
   6892 		    "unable to create TX control data DMA map, error = %d\n",
   6893 		    error);
   6894 		goto fail_2;
   6895 	}
   6896 
   6897 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6898 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6899 		aprint_error_dev(sc->sc_dev,
   6900 		    "unable to load TX control data DMA map, error = %d\n",
   6901 		    error);
   6902 		goto fail_3;
   6903 	}
   6904 
   6905 	return 0;
   6906 
   6907  fail_3:
   6908 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6909  fail_2:
   6910 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6911 	    WM_TXDESCS_SIZE(txq));
   6912  fail_1:
   6913 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6914  fail_0:
   6915 	return error;
   6916 }
   6917 
   6918 static void
   6919 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6920 {
   6921 
   6922 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6923 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6924 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6925 	    WM_TXDESCS_SIZE(txq));
   6926 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6927 }
   6928 
   6929 static int
   6930 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6931 {
   6932 	int error;
   6933 	size_t rxq_descs_size;
   6934 
   6935 	/*
   6936 	 * Allocate the control data structures, and create and load the
   6937 	 * DMA map for it.
   6938 	 *
   6939 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6940 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6941 	 * both sets within the same 4G segment.
   6942 	 */
   6943 	rxq->rxq_ndesc = WM_NRXDESC;
   6944 	if (sc->sc_type == WM_T_82574)
   6945 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6946 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6947 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6948 	else
   6949 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6950 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6951 
   6952 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6953 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6954 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6955 		aprint_error_dev(sc->sc_dev,
   6956 		    "unable to allocate RX control data, error = %d\n",
   6957 		    error);
   6958 		goto fail_0;
   6959 	}
   6960 
   6961 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6962 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6963 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6964 		aprint_error_dev(sc->sc_dev,
   6965 		    "unable to map RX control data, error = %d\n", error);
   6966 		goto fail_1;
   6967 	}
   6968 
   6969 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6970 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6971 		aprint_error_dev(sc->sc_dev,
   6972 		    "unable to create RX control data DMA map, error = %d\n",
   6973 		    error);
   6974 		goto fail_2;
   6975 	}
   6976 
   6977 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6978 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6979 		aprint_error_dev(sc->sc_dev,
   6980 		    "unable to load RX control data DMA map, error = %d\n",
   6981 		    error);
   6982 		goto fail_3;
   6983 	}
   6984 
   6985 	return 0;
   6986 
   6987  fail_3:
   6988 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6989  fail_2:
   6990 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6991 	    rxq_descs_size);
   6992  fail_1:
   6993 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6994  fail_0:
   6995 	return error;
   6996 }
   6997 
   6998 static void
   6999 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7000 {
   7001 
   7002 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7003 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7004 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7005 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7006 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7007 }
   7008 
   7009 
   7010 static int
   7011 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7012 {
   7013 	int i, error;
   7014 
   7015 	/* Create the transmit buffer DMA maps. */
   7016 	WM_TXQUEUELEN(txq) =
   7017 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7018 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7019 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7020 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7021 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7022 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7023 			aprint_error_dev(sc->sc_dev,
   7024 			    "unable to create Tx DMA map %d, error = %d\n",
   7025 			    i, error);
   7026 			goto fail;
   7027 		}
   7028 	}
   7029 
   7030 	return 0;
   7031 
   7032  fail:
   7033 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7034 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7035 			bus_dmamap_destroy(sc->sc_dmat,
   7036 			    txq->txq_soft[i].txs_dmamap);
   7037 	}
   7038 	return error;
   7039 }
   7040 
   7041 static void
   7042 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7043 {
   7044 	int i;
   7045 
   7046 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7047 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7048 			bus_dmamap_destroy(sc->sc_dmat,
   7049 			    txq->txq_soft[i].txs_dmamap);
   7050 	}
   7051 }
   7052 
   7053 static int
   7054 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7055 {
   7056 	int i, error;
   7057 
   7058 	/* Create the receive buffer DMA maps. */
   7059 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7060 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7061 			    MCLBYTES, 0, 0,
   7062 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7063 			aprint_error_dev(sc->sc_dev,
   7064 			    "unable to create Rx DMA map %d error = %d\n",
   7065 			    i, error);
   7066 			goto fail;
   7067 		}
   7068 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7069 	}
   7070 
   7071 	return 0;
   7072 
   7073  fail:
   7074 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7075 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7076 			bus_dmamap_destroy(sc->sc_dmat,
   7077 			    rxq->rxq_soft[i].rxs_dmamap);
   7078 	}
   7079 	return error;
   7080 }
   7081 
   7082 static void
   7083 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7084 {
   7085 	int i;
   7086 
   7087 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7088 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7089 			bus_dmamap_destroy(sc->sc_dmat,
   7090 			    rxq->rxq_soft[i].rxs_dmamap);
   7091 	}
   7092 }
   7093 
   7094 /*
   7095  * wm_alloc_quques:
   7096  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7097  */
   7098 static int
   7099 wm_alloc_txrx_queues(struct wm_softc *sc)
   7100 {
   7101 	int i, error, tx_done, rx_done;
   7102 
   7103 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7104 	    KM_SLEEP);
   7105 	if (sc->sc_queue == NULL) {
   7106 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7107 		error = ENOMEM;
   7108 		goto fail_0;
   7109 	}
   7110 
   7111 	/* For transmission */
   7112 	error = 0;
   7113 	tx_done = 0;
   7114 	for (i = 0; i < sc->sc_nqueues; i++) {
   7115 #ifdef WM_EVENT_COUNTERS
   7116 		int j;
   7117 		const char *xname;
   7118 #endif
   7119 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7120 		txq->txq_sc = sc;
   7121 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7122 
   7123 		error = wm_alloc_tx_descs(sc, txq);
   7124 		if (error)
   7125 			break;
   7126 		error = wm_alloc_tx_buffer(sc, txq);
   7127 		if (error) {
   7128 			wm_free_tx_descs(sc, txq);
   7129 			break;
   7130 		}
   7131 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7132 		if (txq->txq_interq == NULL) {
   7133 			wm_free_tx_descs(sc, txq);
   7134 			wm_free_tx_buffer(sc, txq);
   7135 			error = ENOMEM;
   7136 			break;
   7137 		}
   7138 
   7139 #ifdef WM_EVENT_COUNTERS
   7140 		xname = device_xname(sc->sc_dev);
   7141 
   7142 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7143 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7144 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7145 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7146 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7147 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7148 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7149 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7150 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7151 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7152 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7153 
   7154 		for (j = 0; j < WM_NTXSEGS; j++) {
   7155 			snprintf(txq->txq_txseg_evcnt_names[j],
   7156 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   7157 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   7158 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7159 		}
   7160 
   7161 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7162 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7163 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7164 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7165 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7166 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7167 #endif /* WM_EVENT_COUNTERS */
   7168 
   7169 		tx_done++;
   7170 	}
   7171 	if (error)
   7172 		goto fail_1;
   7173 
   7174 	/* For receive */
   7175 	error = 0;
   7176 	rx_done = 0;
   7177 	for (i = 0; i < sc->sc_nqueues; i++) {
   7178 #ifdef WM_EVENT_COUNTERS
   7179 		const char *xname;
   7180 #endif
   7181 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7182 		rxq->rxq_sc = sc;
   7183 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7184 
   7185 		error = wm_alloc_rx_descs(sc, rxq);
   7186 		if (error)
   7187 			break;
   7188 
   7189 		error = wm_alloc_rx_buffer(sc, rxq);
   7190 		if (error) {
   7191 			wm_free_rx_descs(sc, rxq);
   7192 			break;
   7193 		}
   7194 
   7195 #ifdef WM_EVENT_COUNTERS
   7196 		xname = device_xname(sc->sc_dev);
   7197 
   7198 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7199 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7200 
   7201 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7202 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7203 #endif /* WM_EVENT_COUNTERS */
   7204 
   7205 		rx_done++;
   7206 	}
   7207 	if (error)
   7208 		goto fail_2;
   7209 
   7210 	return 0;
   7211 
   7212  fail_2:
   7213 	for (i = 0; i < rx_done; i++) {
   7214 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7215 		wm_free_rx_buffer(sc, rxq);
   7216 		wm_free_rx_descs(sc, rxq);
   7217 		if (rxq->rxq_lock)
   7218 			mutex_obj_free(rxq->rxq_lock);
   7219 	}
   7220  fail_1:
   7221 	for (i = 0; i < tx_done; i++) {
   7222 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7223 		pcq_destroy(txq->txq_interq);
   7224 		wm_free_tx_buffer(sc, txq);
   7225 		wm_free_tx_descs(sc, txq);
   7226 		if (txq->txq_lock)
   7227 			mutex_obj_free(txq->txq_lock);
   7228 	}
   7229 
   7230 	kmem_free(sc->sc_queue,
   7231 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7232  fail_0:
   7233 	return error;
   7234 }
   7235 
   7236 /*
   7237  * wm_free_quques:
   7238  *	Free {tx,rx}descs and {tx,rx} buffers
   7239  */
   7240 static void
   7241 wm_free_txrx_queues(struct wm_softc *sc)
   7242 {
   7243 	int i;
   7244 
   7245 	for (i = 0; i < sc->sc_nqueues; i++) {
   7246 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7247 
   7248 #ifdef WM_EVENT_COUNTERS
   7249 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7250 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7251 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7252 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7253 #endif /* WM_EVENT_COUNTERS */
   7254 
   7255 		wm_free_rx_buffer(sc, rxq);
   7256 		wm_free_rx_descs(sc, rxq);
   7257 		if (rxq->rxq_lock)
   7258 			mutex_obj_free(rxq->rxq_lock);
   7259 	}
   7260 
   7261 	for (i = 0; i < sc->sc_nqueues; i++) {
   7262 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7263 		struct mbuf *m;
   7264 #ifdef WM_EVENT_COUNTERS
   7265 		int j;
   7266 
   7267 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7268 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7269 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7270 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7271 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7272 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7273 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7274 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7275 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7276 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7277 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7278 
   7279 		for (j = 0; j < WM_NTXSEGS; j++)
   7280 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7281 
   7282 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7283 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7284 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7285 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7286 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7287 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7288 #endif /* WM_EVENT_COUNTERS */
   7289 
   7290 		/* Drain txq_interq */
   7291 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7292 			m_freem(m);
   7293 		pcq_destroy(txq->txq_interq);
   7294 
   7295 		wm_free_tx_buffer(sc, txq);
   7296 		wm_free_tx_descs(sc, txq);
   7297 		if (txq->txq_lock)
   7298 			mutex_obj_free(txq->txq_lock);
   7299 	}
   7300 
   7301 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7302 }
   7303 
   7304 static void
   7305 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7306 {
   7307 
   7308 	KASSERT(mutex_owned(txq->txq_lock));
   7309 
   7310 	/* Initialize the transmit descriptor ring. */
   7311 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7312 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7313 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7314 	txq->txq_free = WM_NTXDESC(txq);
   7315 	txq->txq_next = 0;
   7316 }
   7317 
   7318 static void
   7319 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7320     struct wm_txqueue *txq)
   7321 {
   7322 
   7323 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7324 		device_xname(sc->sc_dev), __func__));
   7325 	KASSERT(mutex_owned(txq->txq_lock));
   7326 
   7327 	if (sc->sc_type < WM_T_82543) {
   7328 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7329 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7330 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7331 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7332 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7333 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7334 	} else {
   7335 		int qid = wmq->wmq_id;
   7336 
   7337 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7338 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7339 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7340 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7341 
   7342 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7343 			/*
   7344 			 * Don't write TDT before TCTL.EN is set.
   7345 			 * See the document.
   7346 			 */
   7347 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7348 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7349 			    | TXDCTL_WTHRESH(0));
   7350 		else {
   7351 			/* XXX should update with AIM? */
   7352 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7353 			if (sc->sc_type >= WM_T_82540) {
   7354 				/* Should be the same */
   7355 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7356 			}
   7357 
   7358 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7359 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7360 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7361 		}
   7362 	}
   7363 }
   7364 
   7365 static void
   7366 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7367 {
   7368 	int i;
   7369 
   7370 	KASSERT(mutex_owned(txq->txq_lock));
   7371 
   7372 	/* Initialize the transmit job descriptors. */
   7373 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7374 		txq->txq_soft[i].txs_mbuf = NULL;
   7375 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7376 	txq->txq_snext = 0;
   7377 	txq->txq_sdirty = 0;
   7378 }
   7379 
   7380 static void
   7381 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7382     struct wm_txqueue *txq)
   7383 {
   7384 
   7385 	KASSERT(mutex_owned(txq->txq_lock));
   7386 
   7387 	/*
   7388 	 * Set up some register offsets that are different between
   7389 	 * the i82542 and the i82543 and later chips.
   7390 	 */
   7391 	if (sc->sc_type < WM_T_82543)
   7392 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7393 	else
   7394 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7395 
   7396 	wm_init_tx_descs(sc, txq);
   7397 	wm_init_tx_regs(sc, wmq, txq);
   7398 	wm_init_tx_buffer(sc, txq);
   7399 
   7400 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7401 	txq->txq_sending = false;
   7402 }
   7403 
   7404 static void
   7405 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7406     struct wm_rxqueue *rxq)
   7407 {
   7408 
   7409 	KASSERT(mutex_owned(rxq->rxq_lock));
   7410 
   7411 	/*
   7412 	 * Initialize the receive descriptor and receive job
   7413 	 * descriptor rings.
   7414 	 */
   7415 	if (sc->sc_type < WM_T_82543) {
   7416 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7417 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7418 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7419 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7420 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7421 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7422 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7423 
   7424 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7425 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7426 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7427 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7428 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7429 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7430 	} else {
   7431 		int qid = wmq->wmq_id;
   7432 
   7433 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7434 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7435 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7436 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7437 
   7438 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7439 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7440 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7441 
   7442 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7443 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7444 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7445 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7446 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7447 			    | RXDCTL_WTHRESH(1));
   7448 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7449 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7450 		} else {
   7451 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7452 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7453 			/* XXX should update with AIM? */
   7454 			CSR_WRITE(sc, WMREG_RDTR,
   7455 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7456 			/* MUST be same */
   7457 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7458 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7459 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7460 		}
   7461 	}
   7462 }
   7463 
   7464 static int
   7465 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7466 {
   7467 	struct wm_rxsoft *rxs;
   7468 	int error, i;
   7469 
   7470 	KASSERT(mutex_owned(rxq->rxq_lock));
   7471 
   7472 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7473 		rxs = &rxq->rxq_soft[i];
   7474 		if (rxs->rxs_mbuf == NULL) {
   7475 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7476 				log(LOG_ERR, "%s: unable to allocate or map "
   7477 				    "rx buffer %d, error = %d\n",
   7478 				    device_xname(sc->sc_dev), i, error);
   7479 				/*
   7480 				 * XXX Should attempt to run with fewer receive
   7481 				 * XXX buffers instead of just failing.
   7482 				 */
   7483 				wm_rxdrain(rxq);
   7484 				return ENOMEM;
   7485 			}
   7486 		} else {
   7487 			/*
   7488 			 * For 82575 and 82576, the RX descriptors must be
   7489 			 * initialized after the setting of RCTL.EN in
   7490 			 * wm_set_filter()
   7491 			 */
   7492 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7493 				wm_init_rxdesc(rxq, i);
   7494 		}
   7495 	}
   7496 	rxq->rxq_ptr = 0;
   7497 	rxq->rxq_discard = 0;
   7498 	WM_RXCHAIN_RESET(rxq);
   7499 
   7500 	return 0;
   7501 }
   7502 
   7503 static int
   7504 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7505     struct wm_rxqueue *rxq)
   7506 {
   7507 
   7508 	KASSERT(mutex_owned(rxq->rxq_lock));
   7509 
   7510 	/*
   7511 	 * Set up some register offsets that are different between
   7512 	 * the i82542 and the i82543 and later chips.
   7513 	 */
   7514 	if (sc->sc_type < WM_T_82543)
   7515 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7516 	else
   7517 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7518 
   7519 	wm_init_rx_regs(sc, wmq, rxq);
   7520 	return wm_init_rx_buffer(sc, rxq);
   7521 }
   7522 
   7523 /*
   7524  * wm_init_quques:
   7525  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7526  */
   7527 static int
   7528 wm_init_txrx_queues(struct wm_softc *sc)
   7529 {
   7530 	int i, error = 0;
   7531 
   7532 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7533 		device_xname(sc->sc_dev), __func__));
   7534 
   7535 	for (i = 0; i < sc->sc_nqueues; i++) {
   7536 		struct wm_queue *wmq = &sc->sc_queue[i];
   7537 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7538 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7539 
   7540 		/*
   7541 		 * TODO
   7542 		 * Currently, use constant variable instead of AIM.
   7543 		 * Furthermore, the interrupt interval of multiqueue which use
   7544 		 * polling mode is less than default value.
   7545 		 * More tuning and AIM are required.
   7546 		 */
   7547 		if (wm_is_using_multiqueue(sc))
   7548 			wmq->wmq_itr = 50;
   7549 		else
   7550 			wmq->wmq_itr = sc->sc_itr_init;
   7551 		wmq->wmq_set_itr = true;
   7552 
   7553 		mutex_enter(txq->txq_lock);
   7554 		wm_init_tx_queue(sc, wmq, txq);
   7555 		mutex_exit(txq->txq_lock);
   7556 
   7557 		mutex_enter(rxq->rxq_lock);
   7558 		error = wm_init_rx_queue(sc, wmq, rxq);
   7559 		mutex_exit(rxq->rxq_lock);
   7560 		if (error)
   7561 			break;
   7562 	}
   7563 
   7564 	return error;
   7565 }
   7566 
   7567 /*
   7568  * wm_tx_offload:
   7569  *
   7570  *	Set up TCP/IP checksumming parameters for the
   7571  *	specified packet.
   7572  */
   7573 static void
   7574 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7575     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7576 {
   7577 	struct mbuf *m0 = txs->txs_mbuf;
   7578 	struct livengood_tcpip_ctxdesc *t;
   7579 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7580 	uint32_t ipcse;
   7581 	struct ether_header *eh;
   7582 	int offset, iphl;
   7583 	uint8_t fields;
   7584 
   7585 	/*
   7586 	 * XXX It would be nice if the mbuf pkthdr had offset
   7587 	 * fields for the protocol headers.
   7588 	 */
   7589 
   7590 	eh = mtod(m0, struct ether_header *);
   7591 	switch (htons(eh->ether_type)) {
   7592 	case ETHERTYPE_IP:
   7593 	case ETHERTYPE_IPV6:
   7594 		offset = ETHER_HDR_LEN;
   7595 		break;
   7596 
   7597 	case ETHERTYPE_VLAN:
   7598 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7599 		break;
   7600 
   7601 	default:
   7602 		/* Don't support this protocol or encapsulation. */
   7603 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7604 		txq->txq_last_hw_ipcs = 0;
   7605 		txq->txq_last_hw_tucs = 0;
   7606 		*fieldsp = 0;
   7607 		*cmdp = 0;
   7608 		return;
   7609 	}
   7610 
   7611 	if ((m0->m_pkthdr.csum_flags &
   7612 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7613 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7614 	} else
   7615 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7616 
   7617 	ipcse = offset + iphl - 1;
   7618 
   7619 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7620 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7621 	seg = 0;
   7622 	fields = 0;
   7623 
   7624 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7625 		int hlen = offset + iphl;
   7626 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7627 
   7628 		if (__predict_false(m0->m_len <
   7629 				    (hlen + sizeof(struct tcphdr)))) {
   7630 			/*
   7631 			 * TCP/IP headers are not in the first mbuf; we need
   7632 			 * to do this the slow and painful way. Let's just
   7633 			 * hope this doesn't happen very often.
   7634 			 */
   7635 			struct tcphdr th;
   7636 
   7637 			WM_Q_EVCNT_INCR(txq, tsopain);
   7638 
   7639 			m_copydata(m0, hlen, sizeof(th), &th);
   7640 			if (v4) {
   7641 				struct ip ip;
   7642 
   7643 				m_copydata(m0, offset, sizeof(ip), &ip);
   7644 				ip.ip_len = 0;
   7645 				m_copyback(m0,
   7646 				    offset + offsetof(struct ip, ip_len),
   7647 				    sizeof(ip.ip_len), &ip.ip_len);
   7648 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7649 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7650 			} else {
   7651 				struct ip6_hdr ip6;
   7652 
   7653 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7654 				ip6.ip6_plen = 0;
   7655 				m_copyback(m0,
   7656 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7657 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7658 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7659 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7660 			}
   7661 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7662 			    sizeof(th.th_sum), &th.th_sum);
   7663 
   7664 			hlen += th.th_off << 2;
   7665 		} else {
   7666 			/*
   7667 			 * TCP/IP headers are in the first mbuf; we can do
   7668 			 * this the easy way.
   7669 			 */
   7670 			struct tcphdr *th;
   7671 
   7672 			if (v4) {
   7673 				struct ip *ip =
   7674 				    (void *)(mtod(m0, char *) + offset);
   7675 				th = (void *)(mtod(m0, char *) + hlen);
   7676 
   7677 				ip->ip_len = 0;
   7678 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7679 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7680 			} else {
   7681 				struct ip6_hdr *ip6 =
   7682 				    (void *)(mtod(m0, char *) + offset);
   7683 				th = (void *)(mtod(m0, char *) + hlen);
   7684 
   7685 				ip6->ip6_plen = 0;
   7686 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7687 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7688 			}
   7689 			hlen += th->th_off << 2;
   7690 		}
   7691 
   7692 		if (v4) {
   7693 			WM_Q_EVCNT_INCR(txq, tso);
   7694 			cmdlen |= WTX_TCPIP_CMD_IP;
   7695 		} else {
   7696 			WM_Q_EVCNT_INCR(txq, tso6);
   7697 			ipcse = 0;
   7698 		}
   7699 		cmd |= WTX_TCPIP_CMD_TSE;
   7700 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7701 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7702 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7703 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7704 	}
   7705 
   7706 	/*
   7707 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7708 	 * offload feature, if we load the context descriptor, we
   7709 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7710 	 */
   7711 
   7712 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7713 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7714 	    WTX_TCPIP_IPCSE(ipcse);
   7715 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7716 		WM_Q_EVCNT_INCR(txq, ipsum);
   7717 		fields |= WTX_IXSM;
   7718 	}
   7719 
   7720 	offset += iphl;
   7721 
   7722 	if (m0->m_pkthdr.csum_flags &
   7723 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7724 		WM_Q_EVCNT_INCR(txq, tusum);
   7725 		fields |= WTX_TXSM;
   7726 		tucs = WTX_TCPIP_TUCSS(offset) |
   7727 		    WTX_TCPIP_TUCSO(offset +
   7728 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7729 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7730 	} else if ((m0->m_pkthdr.csum_flags &
   7731 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7732 		WM_Q_EVCNT_INCR(txq, tusum6);
   7733 		fields |= WTX_TXSM;
   7734 		tucs = WTX_TCPIP_TUCSS(offset) |
   7735 		    WTX_TCPIP_TUCSO(offset +
   7736 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7737 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7738 	} else {
   7739 		/* Just initialize it to a valid TCP context. */
   7740 		tucs = WTX_TCPIP_TUCSS(offset) |
   7741 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7742 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7743 	}
   7744 
   7745 	*cmdp = cmd;
   7746 	*fieldsp = fields;
   7747 
   7748 	/*
   7749 	 * We don't have to write context descriptor for every packet
   7750 	 * except for 82574. For 82574, we must write context descriptor
   7751 	 * for every packet when we use two descriptor queues.
   7752 	 *
   7753 	 * The 82574L can only remember the *last* context used
   7754 	 * regardless of queue that it was use for.  We cannot reuse
   7755 	 * contexts on this hardware platform and must generate a new
   7756 	 * context every time.  82574L hardware spec, section 7.2.6,
   7757 	 * second note.
   7758 	 */
   7759 	if (sc->sc_nqueues < 2) {
   7760 		/*
   7761 		 * Setting up new checksum offload context for every
   7762 		 * frames takes a lot of processing time for hardware.
   7763 		 * This also reduces performance a lot for small sized
   7764 		 * frames so avoid it if driver can use previously
   7765 		 * configured checksum offload context.
   7766 		 * For TSO, in theory we can use the same TSO context only if
   7767 		 * frame is the same type(IP/TCP) and the same MSS. However
   7768 		 * checking whether a frame has the same IP/TCP structure is
   7769 		 * hard thing so just ignore that and always restablish a
   7770 		 * new TSO context.
   7771 		 */
   7772 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   7773 		    == 0) {
   7774 			if (txq->txq_last_hw_cmd == cmd &&
   7775 			    txq->txq_last_hw_fields == fields &&
   7776 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   7777 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   7778 				WM_Q_EVCNT_INCR(txq, skipcontext);
   7779 				return;
   7780 			}
   7781 		}
   7782 
   7783 		txq->txq_last_hw_cmd = cmd;
   7784 		txq->txq_last_hw_fields = fields;
   7785 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   7786 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   7787 	}
   7788 
   7789 	/* Fill in the context descriptor. */
   7790 	t = (struct livengood_tcpip_ctxdesc *)
   7791 	    &txq->txq_descs[txq->txq_next];
   7792 	t->tcpip_ipcs = htole32(ipcs);
   7793 	t->tcpip_tucs = htole32(tucs);
   7794 	t->tcpip_cmdlen = htole32(cmdlen);
   7795 	t->tcpip_seg = htole32(seg);
   7796 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7797 
   7798 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7799 	txs->txs_ndesc++;
   7800 }
   7801 
   7802 static inline int
   7803 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7804 {
   7805 	struct wm_softc *sc = ifp->if_softc;
   7806 	u_int cpuid = cpu_index(curcpu());
   7807 
   7808 	/*
   7809 	 * Currently, simple distribute strategy.
   7810 	 * TODO:
   7811 	 * distribute by flowid(RSS has value).
   7812 	 */
   7813 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7814 }
   7815 
   7816 static inline bool
   7817 wm_linkdown_discard(struct wm_txqueue *txq)
   7818 {
   7819 
   7820 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   7821 		return true;
   7822 
   7823 	return false;
   7824 }
   7825 
   7826 /*
   7827  * wm_start:		[ifnet interface function]
   7828  *
   7829  *	Start packet transmission on the interface.
   7830  */
   7831 static void
   7832 wm_start(struct ifnet *ifp)
   7833 {
   7834 	struct wm_softc *sc = ifp->if_softc;
   7835 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7836 
   7837 #ifdef WM_MPSAFE
   7838 	KASSERT(if_is_mpsafe(ifp));
   7839 #endif
   7840 	/*
   7841 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7842 	 */
   7843 
   7844 	mutex_enter(txq->txq_lock);
   7845 	if (!txq->txq_stopping)
   7846 		wm_start_locked(ifp);
   7847 	mutex_exit(txq->txq_lock);
   7848 }
   7849 
   7850 static void
   7851 wm_start_locked(struct ifnet *ifp)
   7852 {
   7853 	struct wm_softc *sc = ifp->if_softc;
   7854 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7855 
   7856 	wm_send_common_locked(ifp, txq, false);
   7857 }
   7858 
   7859 static int
   7860 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7861 {
   7862 	int qid;
   7863 	struct wm_softc *sc = ifp->if_softc;
   7864 	struct wm_txqueue *txq;
   7865 
   7866 	qid = wm_select_txqueue(ifp, m);
   7867 	txq = &sc->sc_queue[qid].wmq_txq;
   7868 
   7869 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7870 		m_freem(m);
   7871 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7872 		return ENOBUFS;
   7873 	}
   7874 
   7875 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7876 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7877 	if (m->m_flags & M_MCAST)
   7878 		if_statinc_ref(nsr, if_omcasts);
   7879 	IF_STAT_PUTREF(ifp);
   7880 
   7881 	if (mutex_tryenter(txq->txq_lock)) {
   7882 		if (!txq->txq_stopping)
   7883 			wm_transmit_locked(ifp, txq);
   7884 		mutex_exit(txq->txq_lock);
   7885 	}
   7886 
   7887 	return 0;
   7888 }
   7889 
   7890 static void
   7891 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7892 {
   7893 
   7894 	wm_send_common_locked(ifp, txq, true);
   7895 }
   7896 
   7897 static void
   7898 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7899     bool is_transmit)
   7900 {
   7901 	struct wm_softc *sc = ifp->if_softc;
   7902 	struct mbuf *m0;
   7903 	struct wm_txsoft *txs;
   7904 	bus_dmamap_t dmamap;
   7905 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7906 	bus_addr_t curaddr;
   7907 	bus_size_t seglen, curlen;
   7908 	uint32_t cksumcmd;
   7909 	uint8_t cksumfields;
   7910 	bool remap = true;
   7911 
   7912 	KASSERT(mutex_owned(txq->txq_lock));
   7913 
   7914 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7915 		return;
   7916 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7917 		return;
   7918 
   7919 	if (__predict_false(wm_linkdown_discard(txq))) {
   7920 		do {
   7921 			if (is_transmit)
   7922 				m0 = pcq_get(txq->txq_interq);
   7923 			else
   7924 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   7925 			/*
   7926 			 * increment successed packet counter as in the case
   7927 			 * which the packet is discarded by link down PHY.
   7928 			 */
   7929 			if (m0 != NULL)
   7930 				if_statinc(ifp, if_opackets);
   7931 			m_freem(m0);
   7932 		} while (m0 != NULL);
   7933 		return;
   7934 	}
   7935 
   7936 	/* Remember the previous number of free descriptors. */
   7937 	ofree = txq->txq_free;
   7938 
   7939 	/*
   7940 	 * Loop through the send queue, setting up transmit descriptors
   7941 	 * until we drain the queue, or use up all available transmit
   7942 	 * descriptors.
   7943 	 */
   7944 	for (;;) {
   7945 		m0 = NULL;
   7946 
   7947 		/* Get a work queue entry. */
   7948 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7949 			wm_txeof(txq, UINT_MAX);
   7950 			if (txq->txq_sfree == 0) {
   7951 				DPRINTF(sc, WM_DEBUG_TX,
   7952 				    ("%s: TX: no free job descriptors\n",
   7953 					device_xname(sc->sc_dev)));
   7954 				WM_Q_EVCNT_INCR(txq, txsstall);
   7955 				break;
   7956 			}
   7957 		}
   7958 
   7959 		/* Grab a packet off the queue. */
   7960 		if (is_transmit)
   7961 			m0 = pcq_get(txq->txq_interq);
   7962 		else
   7963 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7964 		if (m0 == NULL)
   7965 			break;
   7966 
   7967 		DPRINTF(sc, WM_DEBUG_TX,
   7968 		    ("%s: TX: have packet to transmit: %p\n",
   7969 			device_xname(sc->sc_dev), m0));
   7970 
   7971 		txs = &txq->txq_soft[txq->txq_snext];
   7972 		dmamap = txs->txs_dmamap;
   7973 
   7974 		use_tso = (m0->m_pkthdr.csum_flags &
   7975 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7976 
   7977 		/*
   7978 		 * So says the Linux driver:
   7979 		 * The controller does a simple calculation to make sure
   7980 		 * there is enough room in the FIFO before initiating the
   7981 		 * DMA for each buffer. The calc is:
   7982 		 *	4 = ceil(buffer len / MSS)
   7983 		 * To make sure we don't overrun the FIFO, adjust the max
   7984 		 * buffer len if the MSS drops.
   7985 		 */
   7986 		dmamap->dm_maxsegsz =
   7987 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7988 		    ? m0->m_pkthdr.segsz << 2
   7989 		    : WTX_MAX_LEN;
   7990 
   7991 		/*
   7992 		 * Load the DMA map.  If this fails, the packet either
   7993 		 * didn't fit in the allotted number of segments, or we
   7994 		 * were short on resources.  For the too-many-segments
   7995 		 * case, we simply report an error and drop the packet,
   7996 		 * since we can't sanely copy a jumbo packet to a single
   7997 		 * buffer.
   7998 		 */
   7999 retry:
   8000 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8001 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8002 		if (__predict_false(error)) {
   8003 			if (error == EFBIG) {
   8004 				if (remap == true) {
   8005 					struct mbuf *m;
   8006 
   8007 					remap = false;
   8008 					m = m_defrag(m0, M_NOWAIT);
   8009 					if (m != NULL) {
   8010 						WM_Q_EVCNT_INCR(txq, defrag);
   8011 						m0 = m;
   8012 						goto retry;
   8013 					}
   8014 				}
   8015 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8016 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8017 				    "DMA segments, dropping...\n",
   8018 				    device_xname(sc->sc_dev));
   8019 				wm_dump_mbuf_chain(sc, m0);
   8020 				m_freem(m0);
   8021 				continue;
   8022 			}
   8023 			/* Short on resources, just stop for now. */
   8024 			DPRINTF(sc, WM_DEBUG_TX,
   8025 			    ("%s: TX: dmamap load failed: %d\n",
   8026 				device_xname(sc->sc_dev), error));
   8027 			break;
   8028 		}
   8029 
   8030 		segs_needed = dmamap->dm_nsegs;
   8031 		if (use_tso) {
   8032 			/* For sentinel descriptor; see below. */
   8033 			segs_needed++;
   8034 		}
   8035 
   8036 		/*
   8037 		 * Ensure we have enough descriptors free to describe
   8038 		 * the packet. Note, we always reserve one descriptor
   8039 		 * at the end of the ring due to the semantics of the
   8040 		 * TDT register, plus one more in the event we need
   8041 		 * to load offload context.
   8042 		 */
   8043 		if (segs_needed > txq->txq_free - 2) {
   8044 			/*
   8045 			 * Not enough free descriptors to transmit this
   8046 			 * packet.  We haven't committed anything yet,
   8047 			 * so just unload the DMA map, put the packet
   8048 			 * pack on the queue, and punt. Notify the upper
   8049 			 * layer that there are no more slots left.
   8050 			 */
   8051 			DPRINTF(sc, WM_DEBUG_TX,
   8052 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8053 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8054 				segs_needed, txq->txq_free - 1));
   8055 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8056 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8057 			WM_Q_EVCNT_INCR(txq, txdstall);
   8058 			break;
   8059 		}
   8060 
   8061 		/*
   8062 		 * Check for 82547 Tx FIFO bug. We need to do this
   8063 		 * once we know we can transmit the packet, since we
   8064 		 * do some internal FIFO space accounting here.
   8065 		 */
   8066 		if (sc->sc_type == WM_T_82547 &&
   8067 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8068 			DPRINTF(sc, WM_DEBUG_TX,
   8069 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8070 				device_xname(sc->sc_dev)));
   8071 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8072 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8073 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8074 			break;
   8075 		}
   8076 
   8077 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8078 
   8079 		DPRINTF(sc, WM_DEBUG_TX,
   8080 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8081 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8082 
   8083 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8084 
   8085 		/*
   8086 		 * Store a pointer to the packet so that we can free it
   8087 		 * later.
   8088 		 *
   8089 		 * Initially, we consider the number of descriptors the
   8090 		 * packet uses the number of DMA segments.  This may be
   8091 		 * incremented by 1 if we do checksum offload (a descriptor
   8092 		 * is used to set the checksum context).
   8093 		 */
   8094 		txs->txs_mbuf = m0;
   8095 		txs->txs_firstdesc = txq->txq_next;
   8096 		txs->txs_ndesc = segs_needed;
   8097 
   8098 		/* Set up offload parameters for this packet. */
   8099 		if (m0->m_pkthdr.csum_flags &
   8100 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8101 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8102 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8103 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8104 		} else {
   8105 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8106 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8107 			cksumcmd = 0;
   8108 			cksumfields = 0;
   8109 		}
   8110 
   8111 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8112 
   8113 		/* Sync the DMA map. */
   8114 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8115 		    BUS_DMASYNC_PREWRITE);
   8116 
   8117 		/* Initialize the transmit descriptor. */
   8118 		for (nexttx = txq->txq_next, seg = 0;
   8119 		     seg < dmamap->dm_nsegs; seg++) {
   8120 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8121 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8122 			     seglen != 0;
   8123 			     curaddr += curlen, seglen -= curlen,
   8124 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8125 				curlen = seglen;
   8126 
   8127 				/*
   8128 				 * So says the Linux driver:
   8129 				 * Work around for premature descriptor
   8130 				 * write-backs in TSO mode.  Append a
   8131 				 * 4-byte sentinel descriptor.
   8132 				 */
   8133 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8134 				    curlen > 8)
   8135 					curlen -= 4;
   8136 
   8137 				wm_set_dma_addr(
   8138 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8139 				txq->txq_descs[nexttx].wtx_cmdlen
   8140 				    = htole32(cksumcmd | curlen);
   8141 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8142 				    = 0;
   8143 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8144 				    = cksumfields;
   8145 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8146 				lasttx = nexttx;
   8147 
   8148 				DPRINTF(sc, WM_DEBUG_TX,
   8149 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8150 					"len %#04zx\n",
   8151 					device_xname(sc->sc_dev), nexttx,
   8152 					(uint64_t)curaddr, curlen));
   8153 			}
   8154 		}
   8155 
   8156 		KASSERT(lasttx != -1);
   8157 
   8158 		/*
   8159 		 * Set up the command byte on the last descriptor of
   8160 		 * the packet. If we're in the interrupt delay window,
   8161 		 * delay the interrupt.
   8162 		 */
   8163 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8164 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8165 
   8166 		/*
   8167 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8168 		 * up the descriptor to encapsulate the packet for us.
   8169 		 *
   8170 		 * This is only valid on the last descriptor of the packet.
   8171 		 */
   8172 		if (vlan_has_tag(m0)) {
   8173 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8174 			    htole32(WTX_CMD_VLE);
   8175 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8176 			    = htole16(vlan_get_tag(m0));
   8177 		}
   8178 
   8179 		txs->txs_lastdesc = lasttx;
   8180 
   8181 		DPRINTF(sc, WM_DEBUG_TX,
   8182 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8183 			device_xname(sc->sc_dev),
   8184 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8185 
   8186 		/* Sync the descriptors we're using. */
   8187 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8188 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8189 
   8190 		/* Give the packet to the chip. */
   8191 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8192 
   8193 		DPRINTF(sc, WM_DEBUG_TX,
   8194 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8195 
   8196 		DPRINTF(sc, WM_DEBUG_TX,
   8197 		    ("%s: TX: finished transmitting packet, job %d\n",
   8198 			device_xname(sc->sc_dev), txq->txq_snext));
   8199 
   8200 		/* Advance the tx pointer. */
   8201 		txq->txq_free -= txs->txs_ndesc;
   8202 		txq->txq_next = nexttx;
   8203 
   8204 		txq->txq_sfree--;
   8205 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8206 
   8207 		/* Pass the packet to any BPF listeners. */
   8208 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8209 	}
   8210 
   8211 	if (m0 != NULL) {
   8212 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8213 		WM_Q_EVCNT_INCR(txq, descdrop);
   8214 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8215 			__func__));
   8216 		m_freem(m0);
   8217 	}
   8218 
   8219 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8220 		/* No more slots; notify upper layer. */
   8221 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8222 	}
   8223 
   8224 	if (txq->txq_free != ofree) {
   8225 		/* Set a watchdog timer in case the chip flakes out. */
   8226 		txq->txq_lastsent = time_uptime;
   8227 		txq->txq_sending = true;
   8228 	}
   8229 }
   8230 
   8231 /*
   8232  * wm_nq_tx_offload:
   8233  *
   8234  *	Set up TCP/IP checksumming parameters for the
   8235  *	specified packet, for NEWQUEUE devices
   8236  */
   8237 static void
   8238 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8239     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8240 {
   8241 	struct mbuf *m0 = txs->txs_mbuf;
   8242 	uint32_t vl_len, mssidx, cmdc;
   8243 	struct ether_header *eh;
   8244 	int offset, iphl;
   8245 
   8246 	/*
   8247 	 * XXX It would be nice if the mbuf pkthdr had offset
   8248 	 * fields for the protocol headers.
   8249 	 */
   8250 	*cmdlenp = 0;
   8251 	*fieldsp = 0;
   8252 
   8253 	eh = mtod(m0, struct ether_header *);
   8254 	switch (htons(eh->ether_type)) {
   8255 	case ETHERTYPE_IP:
   8256 	case ETHERTYPE_IPV6:
   8257 		offset = ETHER_HDR_LEN;
   8258 		break;
   8259 
   8260 	case ETHERTYPE_VLAN:
   8261 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8262 		break;
   8263 
   8264 	default:
   8265 		/* Don't support this protocol or encapsulation. */
   8266 		*do_csum = false;
   8267 		return;
   8268 	}
   8269 	*do_csum = true;
   8270 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8271 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8272 
   8273 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8274 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8275 
   8276 	if ((m0->m_pkthdr.csum_flags &
   8277 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8278 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8279 	} else {
   8280 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8281 	}
   8282 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8283 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8284 
   8285 	if (vlan_has_tag(m0)) {
   8286 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8287 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8288 		*cmdlenp |= NQTX_CMD_VLE;
   8289 	}
   8290 
   8291 	mssidx = 0;
   8292 
   8293 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8294 		int hlen = offset + iphl;
   8295 		int tcp_hlen;
   8296 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8297 
   8298 		if (__predict_false(m0->m_len <
   8299 				    (hlen + sizeof(struct tcphdr)))) {
   8300 			/*
   8301 			 * TCP/IP headers are not in the first mbuf; we need
   8302 			 * to do this the slow and painful way. Let's just
   8303 			 * hope this doesn't happen very often.
   8304 			 */
   8305 			struct tcphdr th;
   8306 
   8307 			WM_Q_EVCNT_INCR(txq, tsopain);
   8308 
   8309 			m_copydata(m0, hlen, sizeof(th), &th);
   8310 			if (v4) {
   8311 				struct ip ip;
   8312 
   8313 				m_copydata(m0, offset, sizeof(ip), &ip);
   8314 				ip.ip_len = 0;
   8315 				m_copyback(m0,
   8316 				    offset + offsetof(struct ip, ip_len),
   8317 				    sizeof(ip.ip_len), &ip.ip_len);
   8318 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8319 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8320 			} else {
   8321 				struct ip6_hdr ip6;
   8322 
   8323 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8324 				ip6.ip6_plen = 0;
   8325 				m_copyback(m0,
   8326 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8327 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8328 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8329 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8330 			}
   8331 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8332 			    sizeof(th.th_sum), &th.th_sum);
   8333 
   8334 			tcp_hlen = th.th_off << 2;
   8335 		} else {
   8336 			/*
   8337 			 * TCP/IP headers are in the first mbuf; we can do
   8338 			 * this the easy way.
   8339 			 */
   8340 			struct tcphdr *th;
   8341 
   8342 			if (v4) {
   8343 				struct ip *ip =
   8344 				    (void *)(mtod(m0, char *) + offset);
   8345 				th = (void *)(mtod(m0, char *) + hlen);
   8346 
   8347 				ip->ip_len = 0;
   8348 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8349 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8350 			} else {
   8351 				struct ip6_hdr *ip6 =
   8352 				    (void *)(mtod(m0, char *) + offset);
   8353 				th = (void *)(mtod(m0, char *) + hlen);
   8354 
   8355 				ip6->ip6_plen = 0;
   8356 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8357 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8358 			}
   8359 			tcp_hlen = th->th_off << 2;
   8360 		}
   8361 		hlen += tcp_hlen;
   8362 		*cmdlenp |= NQTX_CMD_TSE;
   8363 
   8364 		if (v4) {
   8365 			WM_Q_EVCNT_INCR(txq, tso);
   8366 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8367 		} else {
   8368 			WM_Q_EVCNT_INCR(txq, tso6);
   8369 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8370 		}
   8371 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8372 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8373 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8374 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8375 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8376 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8377 	} else {
   8378 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8379 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8380 	}
   8381 
   8382 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8383 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8384 		cmdc |= NQTXC_CMD_IP4;
   8385 	}
   8386 
   8387 	if (m0->m_pkthdr.csum_flags &
   8388 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8389 		WM_Q_EVCNT_INCR(txq, tusum);
   8390 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8391 			cmdc |= NQTXC_CMD_TCP;
   8392 		else
   8393 			cmdc |= NQTXC_CMD_UDP;
   8394 
   8395 		cmdc |= NQTXC_CMD_IP4;
   8396 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8397 	}
   8398 	if (m0->m_pkthdr.csum_flags &
   8399 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8400 		WM_Q_EVCNT_INCR(txq, tusum6);
   8401 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8402 			cmdc |= NQTXC_CMD_TCP;
   8403 		else
   8404 			cmdc |= NQTXC_CMD_UDP;
   8405 
   8406 		cmdc |= NQTXC_CMD_IP6;
   8407 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8408 	}
   8409 
   8410 	/*
   8411 	 * We don't have to write context descriptor for every packet to
   8412 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8413 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8414 	 * controllers.
   8415 	 * It would be overhead to write context descriptor for every packet,
   8416 	 * however it does not cause problems.
   8417 	 */
   8418 	/* Fill in the context descriptor. */
   8419 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8420 	    htole32(vl_len);
   8421 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8422 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8423 	    htole32(cmdc);
   8424 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8425 	    htole32(mssidx);
   8426 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8427 	DPRINTF(sc, WM_DEBUG_TX,
   8428 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8429 		txq->txq_next, 0, vl_len));
   8430 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8431 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8432 	txs->txs_ndesc++;
   8433 }
   8434 
   8435 /*
   8436  * wm_nq_start:		[ifnet interface function]
   8437  *
   8438  *	Start packet transmission on the interface for NEWQUEUE devices
   8439  */
   8440 static void
   8441 wm_nq_start(struct ifnet *ifp)
   8442 {
   8443 	struct wm_softc *sc = ifp->if_softc;
   8444 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8445 
   8446 #ifdef WM_MPSAFE
   8447 	KASSERT(if_is_mpsafe(ifp));
   8448 #endif
   8449 	/*
   8450 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8451 	 */
   8452 
   8453 	mutex_enter(txq->txq_lock);
   8454 	if (!txq->txq_stopping)
   8455 		wm_nq_start_locked(ifp);
   8456 	mutex_exit(txq->txq_lock);
   8457 }
   8458 
   8459 static void
   8460 wm_nq_start_locked(struct ifnet *ifp)
   8461 {
   8462 	struct wm_softc *sc = ifp->if_softc;
   8463 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8464 
   8465 	wm_nq_send_common_locked(ifp, txq, false);
   8466 }
   8467 
   8468 static int
   8469 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8470 {
   8471 	int qid;
   8472 	struct wm_softc *sc = ifp->if_softc;
   8473 	struct wm_txqueue *txq;
   8474 
   8475 	qid = wm_select_txqueue(ifp, m);
   8476 	txq = &sc->sc_queue[qid].wmq_txq;
   8477 
   8478 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8479 		m_freem(m);
   8480 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8481 		return ENOBUFS;
   8482 	}
   8483 
   8484 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8485 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8486 	if (m->m_flags & M_MCAST)
   8487 		if_statinc_ref(nsr, if_omcasts);
   8488 	IF_STAT_PUTREF(ifp);
   8489 
   8490 	/*
   8491 	 * The situations which this mutex_tryenter() fails at running time
   8492 	 * are below two patterns.
   8493 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8494 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8495 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8496 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8497 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8498 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8499 	 * stuck, either.
   8500 	 */
   8501 	if (mutex_tryenter(txq->txq_lock)) {
   8502 		if (!txq->txq_stopping)
   8503 			wm_nq_transmit_locked(ifp, txq);
   8504 		mutex_exit(txq->txq_lock);
   8505 	}
   8506 
   8507 	return 0;
   8508 }
   8509 
   8510 static void
   8511 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8512 {
   8513 
   8514 	wm_nq_send_common_locked(ifp, txq, true);
   8515 }
   8516 
   8517 static void
   8518 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8519     bool is_transmit)
   8520 {
   8521 	struct wm_softc *sc = ifp->if_softc;
   8522 	struct mbuf *m0;
   8523 	struct wm_txsoft *txs;
   8524 	bus_dmamap_t dmamap;
   8525 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8526 	bool do_csum, sent;
   8527 	bool remap = true;
   8528 
   8529 	KASSERT(mutex_owned(txq->txq_lock));
   8530 
   8531 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8532 		return;
   8533 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8534 		return;
   8535 
   8536 	if (__predict_false(wm_linkdown_discard(txq))) {
   8537 		do {
   8538 			if (is_transmit)
   8539 				m0 = pcq_get(txq->txq_interq);
   8540 			else
   8541 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8542 			/*
   8543 			 * increment successed packet counter as in the case
   8544 			 * which the packet is discarded by link down PHY.
   8545 			 */
   8546 			if (m0 != NULL)
   8547 				if_statinc(ifp, if_opackets);
   8548 			m_freem(m0);
   8549 		} while (m0 != NULL);
   8550 		return;
   8551 	}
   8552 
   8553 	sent = false;
   8554 
   8555 	/*
   8556 	 * Loop through the send queue, setting up transmit descriptors
   8557 	 * until we drain the queue, or use up all available transmit
   8558 	 * descriptors.
   8559 	 */
   8560 	for (;;) {
   8561 		m0 = NULL;
   8562 
   8563 		/* Get a work queue entry. */
   8564 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8565 			wm_txeof(txq, UINT_MAX);
   8566 			if (txq->txq_sfree == 0) {
   8567 				DPRINTF(sc, WM_DEBUG_TX,
   8568 				    ("%s: TX: no free job descriptors\n",
   8569 					device_xname(sc->sc_dev)));
   8570 				WM_Q_EVCNT_INCR(txq, txsstall);
   8571 				break;
   8572 			}
   8573 		}
   8574 
   8575 		/* Grab a packet off the queue. */
   8576 		if (is_transmit)
   8577 			m0 = pcq_get(txq->txq_interq);
   8578 		else
   8579 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8580 		if (m0 == NULL)
   8581 			break;
   8582 
   8583 		DPRINTF(sc, WM_DEBUG_TX,
   8584 		    ("%s: TX: have packet to transmit: %p\n",
   8585 		    device_xname(sc->sc_dev), m0));
   8586 
   8587 		txs = &txq->txq_soft[txq->txq_snext];
   8588 		dmamap = txs->txs_dmamap;
   8589 
   8590 		/*
   8591 		 * Load the DMA map.  If this fails, the packet either
   8592 		 * didn't fit in the allotted number of segments, or we
   8593 		 * were short on resources.  For the too-many-segments
   8594 		 * case, we simply report an error and drop the packet,
   8595 		 * since we can't sanely copy a jumbo packet to a single
   8596 		 * buffer.
   8597 		 */
   8598 retry:
   8599 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8600 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8601 		if (__predict_false(error)) {
   8602 			if (error == EFBIG) {
   8603 				if (remap == true) {
   8604 					struct mbuf *m;
   8605 
   8606 					remap = false;
   8607 					m = m_defrag(m0, M_NOWAIT);
   8608 					if (m != NULL) {
   8609 						WM_Q_EVCNT_INCR(txq, defrag);
   8610 						m0 = m;
   8611 						goto retry;
   8612 					}
   8613 				}
   8614 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8615 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8616 				    "DMA segments, dropping...\n",
   8617 				    device_xname(sc->sc_dev));
   8618 				wm_dump_mbuf_chain(sc, m0);
   8619 				m_freem(m0);
   8620 				continue;
   8621 			}
   8622 			/* Short on resources, just stop for now. */
   8623 			DPRINTF(sc, WM_DEBUG_TX,
   8624 			    ("%s: TX: dmamap load failed: %d\n",
   8625 				device_xname(sc->sc_dev), error));
   8626 			break;
   8627 		}
   8628 
   8629 		segs_needed = dmamap->dm_nsegs;
   8630 
   8631 		/*
   8632 		 * Ensure we have enough descriptors free to describe
   8633 		 * the packet. Note, we always reserve one descriptor
   8634 		 * at the end of the ring due to the semantics of the
   8635 		 * TDT register, plus one more in the event we need
   8636 		 * to load offload context.
   8637 		 */
   8638 		if (segs_needed > txq->txq_free - 2) {
   8639 			/*
   8640 			 * Not enough free descriptors to transmit this
   8641 			 * packet.  We haven't committed anything yet,
   8642 			 * so just unload the DMA map, put the packet
   8643 			 * pack on the queue, and punt. Notify the upper
   8644 			 * layer that there are no more slots left.
   8645 			 */
   8646 			DPRINTF(sc, WM_DEBUG_TX,
   8647 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8648 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8649 				segs_needed, txq->txq_free - 1));
   8650 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8651 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8652 			WM_Q_EVCNT_INCR(txq, txdstall);
   8653 			break;
   8654 		}
   8655 
   8656 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8657 
   8658 		DPRINTF(sc, WM_DEBUG_TX,
   8659 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8660 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8661 
   8662 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8663 
   8664 		/*
   8665 		 * Store a pointer to the packet so that we can free it
   8666 		 * later.
   8667 		 *
   8668 		 * Initially, we consider the number of descriptors the
   8669 		 * packet uses the number of DMA segments.  This may be
   8670 		 * incremented by 1 if we do checksum offload (a descriptor
   8671 		 * is used to set the checksum context).
   8672 		 */
   8673 		txs->txs_mbuf = m0;
   8674 		txs->txs_firstdesc = txq->txq_next;
   8675 		txs->txs_ndesc = segs_needed;
   8676 
   8677 		/* Set up offload parameters for this packet. */
   8678 		uint32_t cmdlen, fields, dcmdlen;
   8679 		if (m0->m_pkthdr.csum_flags &
   8680 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8681 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8682 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8683 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8684 			    &do_csum);
   8685 		} else {
   8686 			do_csum = false;
   8687 			cmdlen = 0;
   8688 			fields = 0;
   8689 		}
   8690 
   8691 		/* Sync the DMA map. */
   8692 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8693 		    BUS_DMASYNC_PREWRITE);
   8694 
   8695 		/* Initialize the first transmit descriptor. */
   8696 		nexttx = txq->txq_next;
   8697 		if (!do_csum) {
   8698 			/* Setup a legacy descriptor */
   8699 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8700 			    dmamap->dm_segs[0].ds_addr);
   8701 			txq->txq_descs[nexttx].wtx_cmdlen =
   8702 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8703 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8704 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8705 			if (vlan_has_tag(m0)) {
   8706 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8707 				    htole32(WTX_CMD_VLE);
   8708 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8709 				    htole16(vlan_get_tag(m0));
   8710 			} else
   8711 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8712 
   8713 			dcmdlen = 0;
   8714 		} else {
   8715 			/* Setup an advanced data descriptor */
   8716 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8717 			    htole64(dmamap->dm_segs[0].ds_addr);
   8718 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8719 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8720 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8721 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8722 			    htole32(fields);
   8723 			DPRINTF(sc, WM_DEBUG_TX,
   8724 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8725 				device_xname(sc->sc_dev), nexttx,
   8726 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8727 			DPRINTF(sc, WM_DEBUG_TX,
   8728 			    ("\t 0x%08x%08x\n", fields,
   8729 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8730 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8731 		}
   8732 
   8733 		lasttx = nexttx;
   8734 		nexttx = WM_NEXTTX(txq, nexttx);
   8735 		/*
   8736 		 * Fill in the next descriptors. legacy or advanced format
   8737 		 * is the same here
   8738 		 */
   8739 		for (seg = 1; seg < dmamap->dm_nsegs;
   8740 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8741 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8742 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8743 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8744 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8745 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8746 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8747 			lasttx = nexttx;
   8748 
   8749 			DPRINTF(sc, WM_DEBUG_TX,
   8750 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8751 				device_xname(sc->sc_dev), nexttx,
   8752 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8753 				dmamap->dm_segs[seg].ds_len));
   8754 		}
   8755 
   8756 		KASSERT(lasttx != -1);
   8757 
   8758 		/*
   8759 		 * Set up the command byte on the last descriptor of
   8760 		 * the packet. If we're in the interrupt delay window,
   8761 		 * delay the interrupt.
   8762 		 */
   8763 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8764 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8765 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8766 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8767 
   8768 		txs->txs_lastdesc = lasttx;
   8769 
   8770 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8771 		    device_xname(sc->sc_dev),
   8772 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8773 
   8774 		/* Sync the descriptors we're using. */
   8775 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8776 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8777 
   8778 		/* Give the packet to the chip. */
   8779 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8780 		sent = true;
   8781 
   8782 		DPRINTF(sc, WM_DEBUG_TX,
   8783 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8784 
   8785 		DPRINTF(sc, WM_DEBUG_TX,
   8786 		    ("%s: TX: finished transmitting packet, job %d\n",
   8787 			device_xname(sc->sc_dev), txq->txq_snext));
   8788 
   8789 		/* Advance the tx pointer. */
   8790 		txq->txq_free -= txs->txs_ndesc;
   8791 		txq->txq_next = nexttx;
   8792 
   8793 		txq->txq_sfree--;
   8794 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8795 
   8796 		/* Pass the packet to any BPF listeners. */
   8797 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8798 	}
   8799 
   8800 	if (m0 != NULL) {
   8801 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8802 		WM_Q_EVCNT_INCR(txq, descdrop);
   8803 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8804 			__func__));
   8805 		m_freem(m0);
   8806 	}
   8807 
   8808 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8809 		/* No more slots; notify upper layer. */
   8810 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8811 	}
   8812 
   8813 	if (sent) {
   8814 		/* Set a watchdog timer in case the chip flakes out. */
   8815 		txq->txq_lastsent = time_uptime;
   8816 		txq->txq_sending = true;
   8817 	}
   8818 }
   8819 
   8820 static void
   8821 wm_deferred_start_locked(struct wm_txqueue *txq)
   8822 {
   8823 	struct wm_softc *sc = txq->txq_sc;
   8824 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8825 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8826 	int qid = wmq->wmq_id;
   8827 
   8828 	KASSERT(mutex_owned(txq->txq_lock));
   8829 
   8830 	if (txq->txq_stopping) {
   8831 		mutex_exit(txq->txq_lock);
   8832 		return;
   8833 	}
   8834 
   8835 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8836 		/* XXX need for ALTQ or one CPU system */
   8837 		if (qid == 0)
   8838 			wm_nq_start_locked(ifp);
   8839 		wm_nq_transmit_locked(ifp, txq);
   8840 	} else {
   8841 		/* XXX need for ALTQ or one CPU system */
   8842 		if (qid == 0)
   8843 			wm_start_locked(ifp);
   8844 		wm_transmit_locked(ifp, txq);
   8845 	}
   8846 }
   8847 
   8848 /* Interrupt */
   8849 
   8850 /*
   8851  * wm_txeof:
   8852  *
   8853  *	Helper; handle transmit interrupts.
   8854  */
   8855 static bool
   8856 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8857 {
   8858 	struct wm_softc *sc = txq->txq_sc;
   8859 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8860 	struct wm_txsoft *txs;
   8861 	int count = 0;
   8862 	int i;
   8863 	uint8_t status;
   8864 	bool more = false;
   8865 
   8866 	KASSERT(mutex_owned(txq->txq_lock));
   8867 
   8868 	if (txq->txq_stopping)
   8869 		return false;
   8870 
   8871 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8872 
   8873 	/*
   8874 	 * Go through the Tx list and free mbufs for those
   8875 	 * frames which have been transmitted.
   8876 	 */
   8877 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8878 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8879 		if (limit-- == 0) {
   8880 			more = true;
   8881 			DPRINTF(sc, WM_DEBUG_TX,
   8882 			    ("%s: TX: loop limited, job %d is not processed\n",
   8883 				device_xname(sc->sc_dev), i));
   8884 			break;
   8885 		}
   8886 
   8887 		txs = &txq->txq_soft[i];
   8888 
   8889 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8890 			device_xname(sc->sc_dev), i));
   8891 
   8892 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8893 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8894 
   8895 		status =
   8896 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8897 		if ((status & WTX_ST_DD) == 0) {
   8898 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8899 			    BUS_DMASYNC_PREREAD);
   8900 			break;
   8901 		}
   8902 
   8903 		count++;
   8904 		DPRINTF(sc, WM_DEBUG_TX,
   8905 		    ("%s: TX: job %d done: descs %d..%d\n",
   8906 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8907 		    txs->txs_lastdesc));
   8908 
   8909 		/*
   8910 		 * XXX We should probably be using the statistics
   8911 		 * XXX registers, but I don't know if they exist
   8912 		 * XXX on chips before the i82544.
   8913 		 */
   8914 
   8915 #ifdef WM_EVENT_COUNTERS
   8916 		if (status & WTX_ST_TU)
   8917 			WM_Q_EVCNT_INCR(txq, underrun);
   8918 #endif /* WM_EVENT_COUNTERS */
   8919 
   8920 		/*
   8921 		 * 82574 and newer's document says the status field has neither
   8922 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8923 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8924 		 * Developer's Manual", 82574 datasheet and newer.
   8925 		 *
   8926 		 * XXX I saw the LC bit was set on I218 even though the media
   8927 		 * was full duplex, so the bit might be used for other
   8928 		 * meaning ...(I have no document).
   8929 		 */
   8930 
   8931 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8932 		    && ((sc->sc_type < WM_T_82574)
   8933 			|| (sc->sc_type == WM_T_80003))) {
   8934 			if_statinc(ifp, if_oerrors);
   8935 			if (status & WTX_ST_LC)
   8936 				log(LOG_WARNING, "%s: late collision\n",
   8937 				    device_xname(sc->sc_dev));
   8938 			else if (status & WTX_ST_EC) {
   8939 				if_statadd(ifp, if_collisions,
   8940 				    TX_COLLISION_THRESHOLD + 1);
   8941 				log(LOG_WARNING, "%s: excessive collisions\n",
   8942 				    device_xname(sc->sc_dev));
   8943 			}
   8944 		} else
   8945 			if_statinc(ifp, if_opackets);
   8946 
   8947 		txq->txq_packets++;
   8948 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8949 
   8950 		txq->txq_free += txs->txs_ndesc;
   8951 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8952 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8953 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8954 		m_freem(txs->txs_mbuf);
   8955 		txs->txs_mbuf = NULL;
   8956 	}
   8957 
   8958 	/* Update the dirty transmit buffer pointer. */
   8959 	txq->txq_sdirty = i;
   8960 	DPRINTF(sc, WM_DEBUG_TX,
   8961 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8962 
   8963 	if (count != 0)
   8964 		rnd_add_uint32(&sc->rnd_source, count);
   8965 
   8966 	/*
   8967 	 * If there are no more pending transmissions, cancel the watchdog
   8968 	 * timer.
   8969 	 */
   8970 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8971 		txq->txq_sending = false;
   8972 
   8973 	return more;
   8974 }
   8975 
   8976 static inline uint32_t
   8977 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8978 {
   8979 	struct wm_softc *sc = rxq->rxq_sc;
   8980 
   8981 	if (sc->sc_type == WM_T_82574)
   8982 		return EXTRXC_STATUS(
   8983 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   8984 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8985 		return NQRXC_STATUS(
   8986 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   8987 	else
   8988 		return rxq->rxq_descs[idx].wrx_status;
   8989 }
   8990 
   8991 static inline uint32_t
   8992 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8993 {
   8994 	struct wm_softc *sc = rxq->rxq_sc;
   8995 
   8996 	if (sc->sc_type == WM_T_82574)
   8997 		return EXTRXC_ERROR(
   8998 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   8999 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9000 		return NQRXC_ERROR(
   9001 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9002 	else
   9003 		return rxq->rxq_descs[idx].wrx_errors;
   9004 }
   9005 
   9006 static inline uint16_t
   9007 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9008 {
   9009 	struct wm_softc *sc = rxq->rxq_sc;
   9010 
   9011 	if (sc->sc_type == WM_T_82574)
   9012 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9013 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9014 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9015 	else
   9016 		return rxq->rxq_descs[idx].wrx_special;
   9017 }
   9018 
   9019 static inline int
   9020 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9021 {
   9022 	struct wm_softc *sc = rxq->rxq_sc;
   9023 
   9024 	if (sc->sc_type == WM_T_82574)
   9025 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9026 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9027 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9028 	else
   9029 		return rxq->rxq_descs[idx].wrx_len;
   9030 }
   9031 
   9032 #ifdef WM_DEBUG
   9033 static inline uint32_t
   9034 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9035 {
   9036 	struct wm_softc *sc = rxq->rxq_sc;
   9037 
   9038 	if (sc->sc_type == WM_T_82574)
   9039 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9040 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9041 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9042 	else
   9043 		return 0;
   9044 }
   9045 
   9046 static inline uint8_t
   9047 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9048 {
   9049 	struct wm_softc *sc = rxq->rxq_sc;
   9050 
   9051 	if (sc->sc_type == WM_T_82574)
   9052 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9053 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9054 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9055 	else
   9056 		return 0;
   9057 }
   9058 #endif /* WM_DEBUG */
   9059 
   9060 static inline bool
   9061 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9062     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9063 {
   9064 
   9065 	if (sc->sc_type == WM_T_82574)
   9066 		return (status & ext_bit) != 0;
   9067 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9068 		return (status & nq_bit) != 0;
   9069 	else
   9070 		return (status & legacy_bit) != 0;
   9071 }
   9072 
   9073 static inline bool
   9074 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9075     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9076 {
   9077 
   9078 	if (sc->sc_type == WM_T_82574)
   9079 		return (error & ext_bit) != 0;
   9080 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9081 		return (error & nq_bit) != 0;
   9082 	else
   9083 		return (error & legacy_bit) != 0;
   9084 }
   9085 
   9086 static inline bool
   9087 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9088 {
   9089 
   9090 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9091 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9092 		return true;
   9093 	else
   9094 		return false;
   9095 }
   9096 
   9097 static inline bool
   9098 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9099 {
   9100 	struct wm_softc *sc = rxq->rxq_sc;
   9101 
   9102 	/* XXX missing error bit for newqueue? */
   9103 	if (wm_rxdesc_is_set_error(sc, errors,
   9104 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9105 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9106 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9107 		NQRXC_ERROR_RXE)) {
   9108 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9109 		    EXTRXC_ERROR_SE, 0))
   9110 			log(LOG_WARNING, "%s: symbol error\n",
   9111 			    device_xname(sc->sc_dev));
   9112 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9113 		    EXTRXC_ERROR_SEQ, 0))
   9114 			log(LOG_WARNING, "%s: receive sequence error\n",
   9115 			    device_xname(sc->sc_dev));
   9116 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9117 		    EXTRXC_ERROR_CE, 0))
   9118 			log(LOG_WARNING, "%s: CRC error\n",
   9119 			    device_xname(sc->sc_dev));
   9120 		return true;
   9121 	}
   9122 
   9123 	return false;
   9124 }
   9125 
   9126 static inline bool
   9127 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9128 {
   9129 	struct wm_softc *sc = rxq->rxq_sc;
   9130 
   9131 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9132 		NQRXC_STATUS_DD)) {
   9133 		/* We have processed all of the receive descriptors. */
   9134 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9135 		return false;
   9136 	}
   9137 
   9138 	return true;
   9139 }
   9140 
   9141 static inline bool
   9142 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9143     uint16_t vlantag, struct mbuf *m)
   9144 {
   9145 
   9146 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9147 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9148 		vlan_set_tag(m, le16toh(vlantag));
   9149 	}
   9150 
   9151 	return true;
   9152 }
   9153 
   9154 static inline void
   9155 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9156     uint32_t errors, struct mbuf *m)
   9157 {
   9158 	struct wm_softc *sc = rxq->rxq_sc;
   9159 
   9160 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9161 		if (wm_rxdesc_is_set_status(sc, status,
   9162 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9163 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9164 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9165 			if (wm_rxdesc_is_set_error(sc, errors,
   9166 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9167 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9168 		}
   9169 		if (wm_rxdesc_is_set_status(sc, status,
   9170 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9171 			/*
   9172 			 * Note: we don't know if this was TCP or UDP,
   9173 			 * so we just set both bits, and expect the
   9174 			 * upper layers to deal.
   9175 			 */
   9176 			WM_Q_EVCNT_INCR(rxq, tusum);
   9177 			m->m_pkthdr.csum_flags |=
   9178 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9179 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9180 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9181 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9182 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9183 		}
   9184 	}
   9185 }
   9186 
   9187 /*
   9188  * wm_rxeof:
   9189  *
   9190  *	Helper; handle receive interrupts.
   9191  */
   9192 static bool
   9193 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9194 {
   9195 	struct wm_softc *sc = rxq->rxq_sc;
   9196 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9197 	struct wm_rxsoft *rxs;
   9198 	struct mbuf *m;
   9199 	int i, len;
   9200 	int count = 0;
   9201 	uint32_t status, errors;
   9202 	uint16_t vlantag;
   9203 	bool more = false;
   9204 
   9205 	KASSERT(mutex_owned(rxq->rxq_lock));
   9206 
   9207 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9208 		if (limit-- == 0) {
   9209 			more = true;
   9210 			DPRINTF(sc, WM_DEBUG_RX,
   9211 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9212 				device_xname(sc->sc_dev), i));
   9213 			break;
   9214 		}
   9215 
   9216 		rxs = &rxq->rxq_soft[i];
   9217 
   9218 		DPRINTF(sc, WM_DEBUG_RX,
   9219 		    ("%s: RX: checking descriptor %d\n",
   9220 			device_xname(sc->sc_dev), i));
   9221 		wm_cdrxsync(rxq, i,
   9222 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9223 
   9224 		status = wm_rxdesc_get_status(rxq, i);
   9225 		errors = wm_rxdesc_get_errors(rxq, i);
   9226 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9227 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9228 #ifdef WM_DEBUG
   9229 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9230 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9231 #endif
   9232 
   9233 		if (!wm_rxdesc_dd(rxq, i, status)) {
   9234 			break;
   9235 		}
   9236 
   9237 		count++;
   9238 		if (__predict_false(rxq->rxq_discard)) {
   9239 			DPRINTF(sc, WM_DEBUG_RX,
   9240 			    ("%s: RX: discarding contents of descriptor %d\n",
   9241 				device_xname(sc->sc_dev), i));
   9242 			wm_init_rxdesc(rxq, i);
   9243 			if (wm_rxdesc_is_eop(rxq, status)) {
   9244 				/* Reset our state. */
   9245 				DPRINTF(sc, WM_DEBUG_RX,
   9246 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9247 					device_xname(sc->sc_dev)));
   9248 				rxq->rxq_discard = 0;
   9249 			}
   9250 			continue;
   9251 		}
   9252 
   9253 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9254 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9255 
   9256 		m = rxs->rxs_mbuf;
   9257 
   9258 		/*
   9259 		 * Add a new receive buffer to the ring, unless of
   9260 		 * course the length is zero. Treat the latter as a
   9261 		 * failed mapping.
   9262 		 */
   9263 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9264 			/*
   9265 			 * Failed, throw away what we've done so
   9266 			 * far, and discard the rest of the packet.
   9267 			 */
   9268 			if_statinc(ifp, if_ierrors);
   9269 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9270 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9271 			wm_init_rxdesc(rxq, i);
   9272 			if (!wm_rxdesc_is_eop(rxq, status))
   9273 				rxq->rxq_discard = 1;
   9274 			if (rxq->rxq_head != NULL)
   9275 				m_freem(rxq->rxq_head);
   9276 			WM_RXCHAIN_RESET(rxq);
   9277 			DPRINTF(sc, WM_DEBUG_RX,
   9278 			    ("%s: RX: Rx buffer allocation failed, "
   9279 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9280 				rxq->rxq_discard ? " (discard)" : ""));
   9281 			continue;
   9282 		}
   9283 
   9284 		m->m_len = len;
   9285 		rxq->rxq_len += len;
   9286 		DPRINTF(sc, WM_DEBUG_RX,
   9287 		    ("%s: RX: buffer at %p len %d\n",
   9288 			device_xname(sc->sc_dev), m->m_data, len));
   9289 
   9290 		/* If this is not the end of the packet, keep looking. */
   9291 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9292 			WM_RXCHAIN_LINK(rxq, m);
   9293 			DPRINTF(sc, WM_DEBUG_RX,
   9294 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9295 				device_xname(sc->sc_dev), rxq->rxq_len));
   9296 			continue;
   9297 		}
   9298 
   9299 		/*
   9300 		 * Okay, we have the entire packet now. The chip is
   9301 		 * configured to include the FCS except I35[04], I21[01].
   9302 		 * (not all chips can be configured to strip it), so we need
   9303 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9304 		 * in RCTL register is always set, so we don't trim it.
   9305 		 * PCH2 and newer chip also not include FCS when jumbo
   9306 		 * frame is used to do workaround an errata.
   9307 		 * May need to adjust length of previous mbuf in the
   9308 		 * chain if the current mbuf is too short.
   9309 		 */
   9310 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9311 			if (m->m_len < ETHER_CRC_LEN) {
   9312 				rxq->rxq_tail->m_len
   9313 				    -= (ETHER_CRC_LEN - m->m_len);
   9314 				m->m_len = 0;
   9315 			} else
   9316 				m->m_len -= ETHER_CRC_LEN;
   9317 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9318 		} else
   9319 			len = rxq->rxq_len;
   9320 
   9321 		WM_RXCHAIN_LINK(rxq, m);
   9322 
   9323 		*rxq->rxq_tailp = NULL;
   9324 		m = rxq->rxq_head;
   9325 
   9326 		WM_RXCHAIN_RESET(rxq);
   9327 
   9328 		DPRINTF(sc, WM_DEBUG_RX,
   9329 		    ("%s: RX: have entire packet, len -> %d\n",
   9330 			device_xname(sc->sc_dev), len));
   9331 
   9332 		/* If an error occurred, update stats and drop the packet. */
   9333 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9334 			m_freem(m);
   9335 			continue;
   9336 		}
   9337 
   9338 		/* No errors.  Receive the packet. */
   9339 		m_set_rcvif(m, ifp);
   9340 		m->m_pkthdr.len = len;
   9341 		/*
   9342 		 * TODO
   9343 		 * should be save rsshash and rsstype to this mbuf.
   9344 		 */
   9345 		DPRINTF(sc, WM_DEBUG_RX,
   9346 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9347 			device_xname(sc->sc_dev), rsstype, rsshash));
   9348 
   9349 		/*
   9350 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9351 		 * for us.  Associate the tag with the packet.
   9352 		 */
   9353 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9354 			continue;
   9355 
   9356 		/* Set up checksum info for this packet. */
   9357 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9358 
   9359 		rxq->rxq_packets++;
   9360 		rxq->rxq_bytes += len;
   9361 		/* Pass it on. */
   9362 		if_percpuq_enqueue(sc->sc_ipq, m);
   9363 
   9364 		if (rxq->rxq_stopping)
   9365 			break;
   9366 	}
   9367 	rxq->rxq_ptr = i;
   9368 
   9369 	if (count != 0)
   9370 		rnd_add_uint32(&sc->rnd_source, count);
   9371 
   9372 	DPRINTF(sc, WM_DEBUG_RX,
   9373 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9374 
   9375 	return more;
   9376 }
   9377 
   9378 /*
   9379  * wm_linkintr_gmii:
   9380  *
   9381  *	Helper; handle link interrupts for GMII.
   9382  */
   9383 static void
   9384 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9385 {
   9386 	device_t dev = sc->sc_dev;
   9387 	uint32_t status, reg;
   9388 	bool link;
   9389 	int rv;
   9390 
   9391 	KASSERT(WM_CORE_LOCKED(sc));
   9392 
   9393 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9394 		__func__));
   9395 
   9396 	if ((icr & ICR_LSC) == 0) {
   9397 		if (icr & ICR_RXSEQ)
   9398 			DPRINTF(sc, WM_DEBUG_LINK,
   9399 			    ("%s: LINK Receive sequence error\n",
   9400 				device_xname(dev)));
   9401 		return;
   9402 	}
   9403 
   9404 	/* Link status changed */
   9405 	status = CSR_READ(sc, WMREG_STATUS);
   9406 	link = status & STATUS_LU;
   9407 	if (link) {
   9408 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9409 			device_xname(dev),
   9410 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9411 		if (wm_phy_need_linkdown_discard(sc))
   9412 			wm_clear_linkdown_discard(sc);
   9413 	} else {
   9414 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9415 			device_xname(dev)));
   9416 		if (wm_phy_need_linkdown_discard(sc))
   9417 			wm_set_linkdown_discard(sc);
   9418 	}
   9419 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9420 		wm_gig_downshift_workaround_ich8lan(sc);
   9421 
   9422 	if ((sc->sc_type == WM_T_ICH8)
   9423 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9424 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9425 	}
   9426 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9427 		device_xname(dev)));
   9428 	mii_pollstat(&sc->sc_mii);
   9429 	if (sc->sc_type == WM_T_82543) {
   9430 		int miistatus, active;
   9431 
   9432 		/*
   9433 		 * With 82543, we need to force speed and
   9434 		 * duplex on the MAC equal to what the PHY
   9435 		 * speed and duplex configuration is.
   9436 		 */
   9437 		miistatus = sc->sc_mii.mii_media_status;
   9438 
   9439 		if (miistatus & IFM_ACTIVE) {
   9440 			active = sc->sc_mii.mii_media_active;
   9441 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9442 			switch (IFM_SUBTYPE(active)) {
   9443 			case IFM_10_T:
   9444 				sc->sc_ctrl |= CTRL_SPEED_10;
   9445 				break;
   9446 			case IFM_100_TX:
   9447 				sc->sc_ctrl |= CTRL_SPEED_100;
   9448 				break;
   9449 			case IFM_1000_T:
   9450 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9451 				break;
   9452 			default:
   9453 				/*
   9454 				 * Fiber?
   9455 				 * Shoud not enter here.
   9456 				 */
   9457 				device_printf(dev, "unknown media (%x)\n",
   9458 				    active);
   9459 				break;
   9460 			}
   9461 			if (active & IFM_FDX)
   9462 				sc->sc_ctrl |= CTRL_FD;
   9463 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9464 		}
   9465 	} else if (sc->sc_type == WM_T_PCH) {
   9466 		wm_k1_gig_workaround_hv(sc,
   9467 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9468 	}
   9469 
   9470 	/*
   9471 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9472 	 * aggressive resulting in many collisions. To avoid this, increase
   9473 	 * the IPG and reduce Rx latency in the PHY.
   9474 	 */
   9475 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9476 	    && link) {
   9477 		uint32_t tipg_reg;
   9478 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9479 		bool fdx;
   9480 		uint16_t emi_addr, emi_val;
   9481 
   9482 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9483 		tipg_reg &= ~TIPG_IPGT_MASK;
   9484 		fdx = status & STATUS_FD;
   9485 
   9486 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9487 			tipg_reg |= 0xff;
   9488 			/* Reduce Rx latency in analog PHY */
   9489 			emi_val = 0;
   9490 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9491 		    fdx && speed != STATUS_SPEED_1000) {
   9492 			tipg_reg |= 0xc;
   9493 			emi_val = 1;
   9494 		} else {
   9495 			/* Roll back the default values */
   9496 			tipg_reg |= 0x08;
   9497 			emi_val = 1;
   9498 		}
   9499 
   9500 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9501 
   9502 		rv = sc->phy.acquire(sc);
   9503 		if (rv)
   9504 			return;
   9505 
   9506 		if (sc->sc_type == WM_T_PCH2)
   9507 			emi_addr = I82579_RX_CONFIG;
   9508 		else
   9509 			emi_addr = I217_RX_CONFIG;
   9510 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9511 
   9512 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9513 			uint16_t phy_reg;
   9514 
   9515 			sc->phy.readreg_locked(dev, 2,
   9516 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9517 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9518 			if (speed == STATUS_SPEED_100
   9519 			    || speed == STATUS_SPEED_10)
   9520 				phy_reg |= 0x3e8;
   9521 			else
   9522 				phy_reg |= 0xfa;
   9523 			sc->phy.writereg_locked(dev, 2,
   9524 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9525 
   9526 			if (speed == STATUS_SPEED_1000) {
   9527 				sc->phy.readreg_locked(dev, 2,
   9528 				    HV_PM_CTRL, &phy_reg);
   9529 
   9530 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9531 
   9532 				sc->phy.writereg_locked(dev, 2,
   9533 				    HV_PM_CTRL, phy_reg);
   9534 			}
   9535 		}
   9536 		sc->phy.release(sc);
   9537 
   9538 		if (rv)
   9539 			return;
   9540 
   9541 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9542 			uint16_t data, ptr_gap;
   9543 
   9544 			if (speed == STATUS_SPEED_1000) {
   9545 				rv = sc->phy.acquire(sc);
   9546 				if (rv)
   9547 					return;
   9548 
   9549 				rv = sc->phy.readreg_locked(dev, 2,
   9550 				    I82579_UNKNOWN1, &data);
   9551 				if (rv) {
   9552 					sc->phy.release(sc);
   9553 					return;
   9554 				}
   9555 
   9556 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9557 				if (ptr_gap < 0x18) {
   9558 					data &= ~(0x3ff << 2);
   9559 					data |= (0x18 << 2);
   9560 					rv = sc->phy.writereg_locked(dev,
   9561 					    2, I82579_UNKNOWN1, data);
   9562 				}
   9563 				sc->phy.release(sc);
   9564 				if (rv)
   9565 					return;
   9566 			} else {
   9567 				rv = sc->phy.acquire(sc);
   9568 				if (rv)
   9569 					return;
   9570 
   9571 				rv = sc->phy.writereg_locked(dev, 2,
   9572 				    I82579_UNKNOWN1, 0xc023);
   9573 				sc->phy.release(sc);
   9574 				if (rv)
   9575 					return;
   9576 
   9577 			}
   9578 		}
   9579 	}
   9580 
   9581 	/*
   9582 	 * I217 Packet Loss issue:
   9583 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9584 	 * on power up.
   9585 	 * Set the Beacon Duration for I217 to 8 usec
   9586 	 */
   9587 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9588 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9589 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9590 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9591 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9592 	}
   9593 
   9594 	/* Work-around I218 hang issue */
   9595 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9596 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9597 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9598 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9599 		wm_k1_workaround_lpt_lp(sc, link);
   9600 
   9601 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9602 		/*
   9603 		 * Set platform power management values for Latency
   9604 		 * Tolerance Reporting (LTR)
   9605 		 */
   9606 		wm_platform_pm_pch_lpt(sc,
   9607 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9608 	}
   9609 
   9610 	/* Clear link partner's EEE ability */
   9611 	sc->eee_lp_ability = 0;
   9612 
   9613 	/* FEXTNVM6 K1-off workaround */
   9614 	if (sc->sc_type == WM_T_PCH_SPT) {
   9615 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9616 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9617 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9618 		else
   9619 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9620 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9621 	}
   9622 
   9623 	if (!link)
   9624 		return;
   9625 
   9626 	switch (sc->sc_type) {
   9627 	case WM_T_PCH2:
   9628 		wm_k1_workaround_lv(sc);
   9629 		/* FALLTHROUGH */
   9630 	case WM_T_PCH:
   9631 		if (sc->sc_phytype == WMPHY_82578)
   9632 			wm_link_stall_workaround_hv(sc);
   9633 		break;
   9634 	default:
   9635 		break;
   9636 	}
   9637 
   9638 	/* Enable/Disable EEE after link up */
   9639 	if (sc->sc_phytype > WMPHY_82579)
   9640 		wm_set_eee_pchlan(sc);
   9641 }
   9642 
   9643 /*
   9644  * wm_linkintr_tbi:
   9645  *
   9646  *	Helper; handle link interrupts for TBI mode.
   9647  */
   9648 static void
   9649 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9650 {
   9651 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9652 	uint32_t status;
   9653 
   9654 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9655 		__func__));
   9656 
   9657 	status = CSR_READ(sc, WMREG_STATUS);
   9658 	if (icr & ICR_LSC) {
   9659 		wm_check_for_link(sc);
   9660 		if (status & STATUS_LU) {
   9661 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9662 				device_xname(sc->sc_dev),
   9663 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9664 			/*
   9665 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9666 			 * so we should update sc->sc_ctrl
   9667 			 */
   9668 
   9669 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9670 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9671 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9672 			if (status & STATUS_FD)
   9673 				sc->sc_tctl |=
   9674 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9675 			else
   9676 				sc->sc_tctl |=
   9677 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9678 			if (sc->sc_ctrl & CTRL_TFCE)
   9679 				sc->sc_fcrtl |= FCRTL_XONE;
   9680 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9681 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9682 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9683 			sc->sc_tbi_linkup = 1;
   9684 			if_link_state_change(ifp, LINK_STATE_UP);
   9685 		} else {
   9686 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9687 				device_xname(sc->sc_dev)));
   9688 			sc->sc_tbi_linkup = 0;
   9689 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9690 		}
   9691 		/* Update LED */
   9692 		wm_tbi_serdes_set_linkled(sc);
   9693 	} else if (icr & ICR_RXSEQ)
   9694 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9695 			device_xname(sc->sc_dev)));
   9696 }
   9697 
   9698 /*
   9699  * wm_linkintr_serdes:
   9700  *
   9701  *	Helper; handle link interrupts for TBI mode.
   9702  */
   9703 static void
   9704 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9705 {
   9706 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9707 	struct mii_data *mii = &sc->sc_mii;
   9708 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9709 	uint32_t pcs_adv, pcs_lpab, reg;
   9710 
   9711 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9712 		__func__));
   9713 
   9714 	if (icr & ICR_LSC) {
   9715 		/* Check PCS */
   9716 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9717 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9718 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9719 				device_xname(sc->sc_dev)));
   9720 			mii->mii_media_status |= IFM_ACTIVE;
   9721 			sc->sc_tbi_linkup = 1;
   9722 			if_link_state_change(ifp, LINK_STATE_UP);
   9723 		} else {
   9724 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9725 				device_xname(sc->sc_dev)));
   9726 			mii->mii_media_status |= IFM_NONE;
   9727 			sc->sc_tbi_linkup = 0;
   9728 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9729 			wm_tbi_serdes_set_linkled(sc);
   9730 			return;
   9731 		}
   9732 		mii->mii_media_active |= IFM_1000_SX;
   9733 		if ((reg & PCS_LSTS_FDX) != 0)
   9734 			mii->mii_media_active |= IFM_FDX;
   9735 		else
   9736 			mii->mii_media_active |= IFM_HDX;
   9737 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9738 			/* Check flow */
   9739 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9740 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9741 				DPRINTF(sc, WM_DEBUG_LINK,
   9742 				    ("XXX LINKOK but not ACOMP\n"));
   9743 				return;
   9744 			}
   9745 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9746 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9747 			DPRINTF(sc, WM_DEBUG_LINK,
   9748 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9749 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9750 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9751 				mii->mii_media_active |= IFM_FLOW
   9752 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9753 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9754 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9755 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9756 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9757 				mii->mii_media_active |= IFM_FLOW
   9758 				    | IFM_ETH_TXPAUSE;
   9759 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9760 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9761 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9762 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9763 				mii->mii_media_active |= IFM_FLOW
   9764 				    | IFM_ETH_RXPAUSE;
   9765 		}
   9766 		/* Update LED */
   9767 		wm_tbi_serdes_set_linkled(sc);
   9768 	} else
   9769 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9770 		    device_xname(sc->sc_dev)));
   9771 }
   9772 
   9773 /*
   9774  * wm_linkintr:
   9775  *
   9776  *	Helper; handle link interrupts.
   9777  */
   9778 static void
   9779 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9780 {
   9781 
   9782 	KASSERT(WM_CORE_LOCKED(sc));
   9783 
   9784 	if (sc->sc_flags & WM_F_HAS_MII)
   9785 		wm_linkintr_gmii(sc, icr);
   9786 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9787 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9788 		wm_linkintr_serdes(sc, icr);
   9789 	else
   9790 		wm_linkintr_tbi(sc, icr);
   9791 }
   9792 
   9793 
   9794 static inline void
   9795 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9796 {
   9797 
   9798 	if (wmq->wmq_txrx_use_workqueue)
   9799 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9800 	else
   9801 		softint_schedule(wmq->wmq_si);
   9802 }
   9803 
   9804 static inline void
   9805 wm_legacy_intr_disable(struct wm_softc *sc)
   9806 {
   9807 
   9808 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   9809 }
   9810 
   9811 static inline void
   9812 wm_legacy_intr_enable(struct wm_softc *sc)
   9813 {
   9814 
   9815 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   9816 }
   9817 
   9818 /*
   9819  * wm_intr_legacy:
   9820  *
   9821  *	Interrupt service routine for INTx and MSI.
   9822  */
   9823 static int
   9824 wm_intr_legacy(void *arg)
   9825 {
   9826 	struct wm_softc *sc = arg;
   9827 	struct wm_queue *wmq = &sc->sc_queue[0];
   9828 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9829 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9830 	uint32_t icr, rndval = 0;
   9831 	int handled = 0;
   9832 	bool more = false;
   9833 
   9834 	while (1 /* CONSTCOND */) {
   9835 		icr = CSR_READ(sc, WMREG_ICR);
   9836 		if ((icr & sc->sc_icr) == 0)
   9837 			break;
   9838 		if (handled == 0)
   9839 			DPRINTF(sc, WM_DEBUG_TX,
   9840 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9841 		if (rndval == 0)
   9842 			rndval = icr;
   9843 
   9844 		mutex_enter(rxq->rxq_lock);
   9845 
   9846 		if (rxq->rxq_stopping) {
   9847 			mutex_exit(rxq->rxq_lock);
   9848 			break;
   9849 		}
   9850 
   9851 		handled = 1;
   9852 
   9853 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9854 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9855 			DPRINTF(sc, WM_DEBUG_RX,
   9856 			    ("%s: RX: got Rx intr 0x%08x\n",
   9857 				device_xname(sc->sc_dev),
   9858 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9859 			WM_Q_EVCNT_INCR(rxq, intr);
   9860 		}
   9861 #endif
   9862 		/*
   9863 		 * wm_rxeof() does *not* call upper layer functions directly,
   9864 		 * as if_percpuq_enqueue() just call softint_schedule().
   9865 		 * So, we can call wm_rxeof() in interrupt context.
   9866 		 */
   9867 		more = wm_rxeof(rxq, UINT_MAX);
   9868 
   9869 		mutex_exit(rxq->rxq_lock);
   9870 		mutex_enter(txq->txq_lock);
   9871 
   9872 		if (txq->txq_stopping) {
   9873 			mutex_exit(txq->txq_lock);
   9874 			break;
   9875 		}
   9876 
   9877 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9878 		if (icr & ICR_TXDW) {
   9879 			DPRINTF(sc, WM_DEBUG_TX,
   9880 			    ("%s: TX: got TXDW interrupt\n",
   9881 				device_xname(sc->sc_dev)));
   9882 			WM_Q_EVCNT_INCR(txq, txdw);
   9883 		}
   9884 #endif
   9885 		more |= wm_txeof(txq, UINT_MAX);
   9886 
   9887 		mutex_exit(txq->txq_lock);
   9888 		WM_CORE_LOCK(sc);
   9889 
   9890 		if (sc->sc_core_stopping) {
   9891 			WM_CORE_UNLOCK(sc);
   9892 			break;
   9893 		}
   9894 
   9895 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9896 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9897 			wm_linkintr(sc, icr);
   9898 		}
   9899 		if ((icr & ICR_GPI(0)) != 0)
   9900 			device_printf(sc->sc_dev, "got module interrupt\n");
   9901 
   9902 		WM_CORE_UNLOCK(sc);
   9903 
   9904 		if (icr & ICR_RXO) {
   9905 #if defined(WM_DEBUG)
   9906 			log(LOG_WARNING, "%s: Receive overrun\n",
   9907 			    device_xname(sc->sc_dev));
   9908 #endif /* defined(WM_DEBUG) */
   9909 		}
   9910 	}
   9911 
   9912 	rnd_add_uint32(&sc->rnd_source, rndval);
   9913 
   9914 	if (more) {
   9915 		/* Try to get more packets going. */
   9916 		wm_legacy_intr_disable(sc);
   9917 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9918 		wm_sched_handle_queue(sc, wmq);
   9919 	}
   9920 
   9921 	return handled;
   9922 }
   9923 
   9924 static inline void
   9925 wm_txrxintr_disable(struct wm_queue *wmq)
   9926 {
   9927 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9928 
   9929 	if (__predict_false(!wm_is_using_msix(sc))) {
   9930 		return wm_legacy_intr_disable(sc);
   9931 	}
   9932 
   9933 	if (sc->sc_type == WM_T_82574)
   9934 		CSR_WRITE(sc, WMREG_IMC,
   9935 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9936 	else if (sc->sc_type == WM_T_82575)
   9937 		CSR_WRITE(sc, WMREG_EIMC,
   9938 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9939 	else
   9940 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9941 }
   9942 
   9943 static inline void
   9944 wm_txrxintr_enable(struct wm_queue *wmq)
   9945 {
   9946 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9947 
   9948 	wm_itrs_calculate(sc, wmq);
   9949 
   9950 	if (__predict_false(!wm_is_using_msix(sc))) {
   9951 		return wm_legacy_intr_enable(sc);
   9952 	}
   9953 
   9954 	/*
   9955 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9956 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9957 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9958 	 * while each wm_handle_queue(wmq) is runnig.
   9959 	 */
   9960 	if (sc->sc_type == WM_T_82574)
   9961 		CSR_WRITE(sc, WMREG_IMS,
   9962 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9963 	else if (sc->sc_type == WM_T_82575)
   9964 		CSR_WRITE(sc, WMREG_EIMS,
   9965 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9966 	else
   9967 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9968 }
   9969 
   9970 static int
   9971 wm_txrxintr_msix(void *arg)
   9972 {
   9973 	struct wm_queue *wmq = arg;
   9974 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9975 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9976 	struct wm_softc *sc = txq->txq_sc;
   9977 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9978 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9979 	bool txmore;
   9980 	bool rxmore;
   9981 
   9982 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9983 
   9984 	DPRINTF(sc, WM_DEBUG_TX,
   9985 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9986 
   9987 	wm_txrxintr_disable(wmq);
   9988 
   9989 	mutex_enter(txq->txq_lock);
   9990 
   9991 	if (txq->txq_stopping) {
   9992 		mutex_exit(txq->txq_lock);
   9993 		return 0;
   9994 	}
   9995 
   9996 	WM_Q_EVCNT_INCR(txq, txdw);
   9997 	txmore = wm_txeof(txq, txlimit);
   9998 	/* wm_deferred start() is done in wm_handle_queue(). */
   9999 	mutex_exit(txq->txq_lock);
   10000 
   10001 	DPRINTF(sc, WM_DEBUG_RX,
   10002 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   10003 	mutex_enter(rxq->rxq_lock);
   10004 
   10005 	if (rxq->rxq_stopping) {
   10006 		mutex_exit(rxq->rxq_lock);
   10007 		return 0;
   10008 	}
   10009 
   10010 	WM_Q_EVCNT_INCR(rxq, intr);
   10011 	rxmore = wm_rxeof(rxq, rxlimit);
   10012 	mutex_exit(rxq->rxq_lock);
   10013 
   10014 	wm_itrs_writereg(sc, wmq);
   10015 
   10016 	if (txmore || rxmore) {
   10017 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10018 		wm_sched_handle_queue(sc, wmq);
   10019 	} else
   10020 		wm_txrxintr_enable(wmq);
   10021 
   10022 	return 1;
   10023 }
   10024 
   10025 static void
   10026 wm_handle_queue(void *arg)
   10027 {
   10028 	struct wm_queue *wmq = arg;
   10029 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10030 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10031 	struct wm_softc *sc = txq->txq_sc;
   10032 	u_int txlimit = sc->sc_tx_process_limit;
   10033 	u_int rxlimit = sc->sc_rx_process_limit;
   10034 	bool txmore;
   10035 	bool rxmore;
   10036 
   10037 	mutex_enter(txq->txq_lock);
   10038 	if (txq->txq_stopping) {
   10039 		mutex_exit(txq->txq_lock);
   10040 		return;
   10041 	}
   10042 	txmore = wm_txeof(txq, txlimit);
   10043 	wm_deferred_start_locked(txq);
   10044 	mutex_exit(txq->txq_lock);
   10045 
   10046 	mutex_enter(rxq->rxq_lock);
   10047 	if (rxq->rxq_stopping) {
   10048 		mutex_exit(rxq->rxq_lock);
   10049 		return;
   10050 	}
   10051 	WM_Q_EVCNT_INCR(rxq, defer);
   10052 	rxmore = wm_rxeof(rxq, rxlimit);
   10053 	mutex_exit(rxq->rxq_lock);
   10054 
   10055 	if (txmore || rxmore) {
   10056 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10057 		wm_sched_handle_queue(sc, wmq);
   10058 	} else
   10059 		wm_txrxintr_enable(wmq);
   10060 }
   10061 
   10062 static void
   10063 wm_handle_queue_work(struct work *wk, void *context)
   10064 {
   10065 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10066 
   10067 	/*
   10068 	 * "enqueued flag" is not required here.
   10069 	 */
   10070 	wm_handle_queue(wmq);
   10071 }
   10072 
   10073 /*
   10074  * wm_linkintr_msix:
   10075  *
   10076  *	Interrupt service routine for link status change for MSI-X.
   10077  */
   10078 static int
   10079 wm_linkintr_msix(void *arg)
   10080 {
   10081 	struct wm_softc *sc = arg;
   10082 	uint32_t reg;
   10083 	bool has_rxo;
   10084 
   10085 	reg = CSR_READ(sc, WMREG_ICR);
   10086 	WM_CORE_LOCK(sc);
   10087 	DPRINTF(sc, WM_DEBUG_LINK,
   10088 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10089 		device_xname(sc->sc_dev), reg));
   10090 
   10091 	if (sc->sc_core_stopping)
   10092 		goto out;
   10093 
   10094 	if ((reg & ICR_LSC) != 0) {
   10095 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10096 		wm_linkintr(sc, ICR_LSC);
   10097 	}
   10098 	if ((reg & ICR_GPI(0)) != 0)
   10099 		device_printf(sc->sc_dev, "got module interrupt\n");
   10100 
   10101 	/*
   10102 	 * XXX 82574 MSI-X mode workaround
   10103 	 *
   10104 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10105 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10106 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10107 	 * interrupts by writing WMREG_ICS to process receive packets.
   10108 	 */
   10109 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10110 #if defined(WM_DEBUG)
   10111 		log(LOG_WARNING, "%s: Receive overrun\n",
   10112 		    device_xname(sc->sc_dev));
   10113 #endif /* defined(WM_DEBUG) */
   10114 
   10115 		has_rxo = true;
   10116 		/*
   10117 		 * The RXO interrupt is very high rate when receive traffic is
   10118 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10119 		 * interrupts. ICR_OTHER will be enabled at the end of
   10120 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10121 		 * ICR_RXQ(1) interrupts.
   10122 		 */
   10123 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10124 
   10125 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10126 	}
   10127 
   10128 
   10129 
   10130 out:
   10131 	WM_CORE_UNLOCK(sc);
   10132 
   10133 	if (sc->sc_type == WM_T_82574) {
   10134 		if (!has_rxo)
   10135 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10136 		else
   10137 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10138 	} else if (sc->sc_type == WM_T_82575)
   10139 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10140 	else
   10141 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10142 
   10143 	return 1;
   10144 }
   10145 
   10146 /*
   10147  * Media related.
   10148  * GMII, SGMII, TBI (and SERDES)
   10149  */
   10150 
   10151 /* Common */
   10152 
   10153 /*
   10154  * wm_tbi_serdes_set_linkled:
   10155  *
   10156  *	Update the link LED on TBI and SERDES devices.
   10157  */
   10158 static void
   10159 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10160 {
   10161 
   10162 	if (sc->sc_tbi_linkup)
   10163 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10164 	else
   10165 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10166 
   10167 	/* 82540 or newer devices are active low */
   10168 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10169 
   10170 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10171 }
   10172 
   10173 /* GMII related */
   10174 
   10175 /*
   10176  * wm_gmii_reset:
   10177  *
   10178  *	Reset the PHY.
   10179  */
   10180 static void
   10181 wm_gmii_reset(struct wm_softc *sc)
   10182 {
   10183 	uint32_t reg;
   10184 	int rv;
   10185 
   10186 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10187 		device_xname(sc->sc_dev), __func__));
   10188 
   10189 	rv = sc->phy.acquire(sc);
   10190 	if (rv != 0) {
   10191 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10192 		    __func__);
   10193 		return;
   10194 	}
   10195 
   10196 	switch (sc->sc_type) {
   10197 	case WM_T_82542_2_0:
   10198 	case WM_T_82542_2_1:
   10199 		/* null */
   10200 		break;
   10201 	case WM_T_82543:
   10202 		/*
   10203 		 * With 82543, we need to force speed and duplex on the MAC
   10204 		 * equal to what the PHY speed and duplex configuration is.
   10205 		 * In addition, we need to perform a hardware reset on the PHY
   10206 		 * to take it out of reset.
   10207 		 */
   10208 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10209 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10210 
   10211 		/* The PHY reset pin is active-low. */
   10212 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10213 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10214 		    CTRL_EXT_SWDPIN(4));
   10215 		reg |= CTRL_EXT_SWDPIO(4);
   10216 
   10217 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10218 		CSR_WRITE_FLUSH(sc);
   10219 		delay(10*1000);
   10220 
   10221 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10222 		CSR_WRITE_FLUSH(sc);
   10223 		delay(150);
   10224 #if 0
   10225 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10226 #endif
   10227 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10228 		break;
   10229 	case WM_T_82544:	/* Reset 10000us */
   10230 	case WM_T_82540:
   10231 	case WM_T_82545:
   10232 	case WM_T_82545_3:
   10233 	case WM_T_82546:
   10234 	case WM_T_82546_3:
   10235 	case WM_T_82541:
   10236 	case WM_T_82541_2:
   10237 	case WM_T_82547:
   10238 	case WM_T_82547_2:
   10239 	case WM_T_82571:	/* Reset 100us */
   10240 	case WM_T_82572:
   10241 	case WM_T_82573:
   10242 	case WM_T_82574:
   10243 	case WM_T_82575:
   10244 	case WM_T_82576:
   10245 	case WM_T_82580:
   10246 	case WM_T_I350:
   10247 	case WM_T_I354:
   10248 	case WM_T_I210:
   10249 	case WM_T_I211:
   10250 	case WM_T_82583:
   10251 	case WM_T_80003:
   10252 		/* Generic reset */
   10253 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10254 		CSR_WRITE_FLUSH(sc);
   10255 		delay(20000);
   10256 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10257 		CSR_WRITE_FLUSH(sc);
   10258 		delay(20000);
   10259 
   10260 		if ((sc->sc_type == WM_T_82541)
   10261 		    || (sc->sc_type == WM_T_82541_2)
   10262 		    || (sc->sc_type == WM_T_82547)
   10263 		    || (sc->sc_type == WM_T_82547_2)) {
   10264 			/* Workaround for igp are done in igp_reset() */
   10265 			/* XXX add code to set LED after phy reset */
   10266 		}
   10267 		break;
   10268 	case WM_T_ICH8:
   10269 	case WM_T_ICH9:
   10270 	case WM_T_ICH10:
   10271 	case WM_T_PCH:
   10272 	case WM_T_PCH2:
   10273 	case WM_T_PCH_LPT:
   10274 	case WM_T_PCH_SPT:
   10275 	case WM_T_PCH_CNP:
   10276 		/* Generic reset */
   10277 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10278 		CSR_WRITE_FLUSH(sc);
   10279 		delay(100);
   10280 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10281 		CSR_WRITE_FLUSH(sc);
   10282 		delay(150);
   10283 		break;
   10284 	default:
   10285 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10286 		    __func__);
   10287 		break;
   10288 	}
   10289 
   10290 	sc->phy.release(sc);
   10291 
   10292 	/* get_cfg_done */
   10293 	wm_get_cfg_done(sc);
   10294 
   10295 	/* Extra setup */
   10296 	switch (sc->sc_type) {
   10297 	case WM_T_82542_2_0:
   10298 	case WM_T_82542_2_1:
   10299 	case WM_T_82543:
   10300 	case WM_T_82544:
   10301 	case WM_T_82540:
   10302 	case WM_T_82545:
   10303 	case WM_T_82545_3:
   10304 	case WM_T_82546:
   10305 	case WM_T_82546_3:
   10306 	case WM_T_82541_2:
   10307 	case WM_T_82547_2:
   10308 	case WM_T_82571:
   10309 	case WM_T_82572:
   10310 	case WM_T_82573:
   10311 	case WM_T_82574:
   10312 	case WM_T_82583:
   10313 	case WM_T_82575:
   10314 	case WM_T_82576:
   10315 	case WM_T_82580:
   10316 	case WM_T_I350:
   10317 	case WM_T_I354:
   10318 	case WM_T_I210:
   10319 	case WM_T_I211:
   10320 	case WM_T_80003:
   10321 		/* Null */
   10322 		break;
   10323 	case WM_T_82541:
   10324 	case WM_T_82547:
   10325 		/* XXX Configure actively LED after PHY reset */
   10326 		break;
   10327 	case WM_T_ICH8:
   10328 	case WM_T_ICH9:
   10329 	case WM_T_ICH10:
   10330 	case WM_T_PCH:
   10331 	case WM_T_PCH2:
   10332 	case WM_T_PCH_LPT:
   10333 	case WM_T_PCH_SPT:
   10334 	case WM_T_PCH_CNP:
   10335 		wm_phy_post_reset(sc);
   10336 		break;
   10337 	default:
   10338 		panic("%s: unknown type\n", __func__);
   10339 		break;
   10340 	}
   10341 }
   10342 
   10343 /*
   10344  * Setup sc_phytype and mii_{read|write}reg.
   10345  *
   10346  *  To identify PHY type, correct read/write function should be selected.
   10347  * To select correct read/write function, PCI ID or MAC type are required
   10348  * without accessing PHY registers.
   10349  *
   10350  *  On the first call of this function, PHY ID is not known yet. Check
   10351  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10352  * result might be incorrect.
   10353  *
   10354  *  In the second call, PHY OUI and model is used to identify PHY type.
   10355  * It might not be perfect because of the lack of compared entry, but it
   10356  * would be better than the first call.
   10357  *
   10358  *  If the detected new result and previous assumption is different,
   10359  * diagnous message will be printed.
   10360  */
   10361 static void
   10362 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10363     uint16_t phy_model)
   10364 {
   10365 	device_t dev = sc->sc_dev;
   10366 	struct mii_data *mii = &sc->sc_mii;
   10367 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10368 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10369 	mii_readreg_t new_readreg;
   10370 	mii_writereg_t new_writereg;
   10371 	bool dodiag = true;
   10372 
   10373 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10374 		device_xname(sc->sc_dev), __func__));
   10375 
   10376 	/*
   10377 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10378 	 * incorrect. So don't print diag output when it's 2nd call.
   10379 	 */
   10380 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10381 		dodiag = false;
   10382 
   10383 	if (mii->mii_readreg == NULL) {
   10384 		/*
   10385 		 *  This is the first call of this function. For ICH and PCH
   10386 		 * variants, it's difficult to determine the PHY access method
   10387 		 * by sc_type, so use the PCI product ID for some devices.
   10388 		 */
   10389 
   10390 		switch (sc->sc_pcidevid) {
   10391 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10392 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10393 			/* 82577 */
   10394 			new_phytype = WMPHY_82577;
   10395 			break;
   10396 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10397 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10398 			/* 82578 */
   10399 			new_phytype = WMPHY_82578;
   10400 			break;
   10401 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10402 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10403 			/* 82579 */
   10404 			new_phytype = WMPHY_82579;
   10405 			break;
   10406 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10407 		case PCI_PRODUCT_INTEL_82801I_BM:
   10408 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10409 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10410 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10411 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10412 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10413 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10414 			/* ICH8, 9, 10 with 82567 */
   10415 			new_phytype = WMPHY_BM;
   10416 			break;
   10417 		default:
   10418 			break;
   10419 		}
   10420 	} else {
   10421 		/* It's not the first call. Use PHY OUI and model */
   10422 		switch (phy_oui) {
   10423 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10424 			switch (phy_model) {
   10425 			case 0x0004: /* XXX */
   10426 				new_phytype = WMPHY_82578;
   10427 				break;
   10428 			default:
   10429 				break;
   10430 			}
   10431 			break;
   10432 		case MII_OUI_xxMARVELL:
   10433 			switch (phy_model) {
   10434 			case MII_MODEL_xxMARVELL_I210:
   10435 				new_phytype = WMPHY_I210;
   10436 				break;
   10437 			case MII_MODEL_xxMARVELL_E1011:
   10438 			case MII_MODEL_xxMARVELL_E1000_3:
   10439 			case MII_MODEL_xxMARVELL_E1000_5:
   10440 			case MII_MODEL_xxMARVELL_E1112:
   10441 				new_phytype = WMPHY_M88;
   10442 				break;
   10443 			case MII_MODEL_xxMARVELL_E1149:
   10444 				new_phytype = WMPHY_BM;
   10445 				break;
   10446 			case MII_MODEL_xxMARVELL_E1111:
   10447 			case MII_MODEL_xxMARVELL_I347:
   10448 			case MII_MODEL_xxMARVELL_E1512:
   10449 			case MII_MODEL_xxMARVELL_E1340M:
   10450 			case MII_MODEL_xxMARVELL_E1543:
   10451 				new_phytype = WMPHY_M88;
   10452 				break;
   10453 			case MII_MODEL_xxMARVELL_I82563:
   10454 				new_phytype = WMPHY_GG82563;
   10455 				break;
   10456 			default:
   10457 				break;
   10458 			}
   10459 			break;
   10460 		case MII_OUI_INTEL:
   10461 			switch (phy_model) {
   10462 			case MII_MODEL_INTEL_I82577:
   10463 				new_phytype = WMPHY_82577;
   10464 				break;
   10465 			case MII_MODEL_INTEL_I82579:
   10466 				new_phytype = WMPHY_82579;
   10467 				break;
   10468 			case MII_MODEL_INTEL_I217:
   10469 				new_phytype = WMPHY_I217;
   10470 				break;
   10471 			case MII_MODEL_INTEL_I82580:
   10472 				new_phytype = WMPHY_82580;
   10473 				break;
   10474 			case MII_MODEL_INTEL_I350:
   10475 				new_phytype = WMPHY_I350;
   10476 				break;
   10477 				break;
   10478 			default:
   10479 				break;
   10480 			}
   10481 			break;
   10482 		case MII_OUI_yyINTEL:
   10483 			switch (phy_model) {
   10484 			case MII_MODEL_yyINTEL_I82562G:
   10485 			case MII_MODEL_yyINTEL_I82562EM:
   10486 			case MII_MODEL_yyINTEL_I82562ET:
   10487 				new_phytype = WMPHY_IFE;
   10488 				break;
   10489 			case MII_MODEL_yyINTEL_IGP01E1000:
   10490 				new_phytype = WMPHY_IGP;
   10491 				break;
   10492 			case MII_MODEL_yyINTEL_I82566:
   10493 				new_phytype = WMPHY_IGP_3;
   10494 				break;
   10495 			default:
   10496 				break;
   10497 			}
   10498 			break;
   10499 		default:
   10500 			break;
   10501 		}
   10502 
   10503 		if (dodiag) {
   10504 			if (new_phytype == WMPHY_UNKNOWN)
   10505 				aprint_verbose_dev(dev,
   10506 				    "%s: Unknown PHY model. OUI=%06x, "
   10507 				    "model=%04x\n", __func__, phy_oui,
   10508 				    phy_model);
   10509 
   10510 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10511 			    && (sc->sc_phytype != new_phytype)) {
   10512 				aprint_error_dev(dev, "Previously assumed PHY "
   10513 				    "type(%u) was incorrect. PHY type from PHY"
   10514 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10515 			}
   10516 		}
   10517 	}
   10518 
   10519 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10520 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10521 		/* SGMII */
   10522 		new_readreg = wm_sgmii_readreg;
   10523 		new_writereg = wm_sgmii_writereg;
   10524 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10525 		/* BM2 (phyaddr == 1) */
   10526 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10527 		    && (new_phytype != WMPHY_BM)
   10528 		    && (new_phytype != WMPHY_UNKNOWN))
   10529 			doubt_phytype = new_phytype;
   10530 		new_phytype = WMPHY_BM;
   10531 		new_readreg = wm_gmii_bm_readreg;
   10532 		new_writereg = wm_gmii_bm_writereg;
   10533 	} else if (sc->sc_type >= WM_T_PCH) {
   10534 		/* All PCH* use _hv_ */
   10535 		new_readreg = wm_gmii_hv_readreg;
   10536 		new_writereg = wm_gmii_hv_writereg;
   10537 	} else if (sc->sc_type >= WM_T_ICH8) {
   10538 		/* non-82567 ICH8, 9 and 10 */
   10539 		new_readreg = wm_gmii_i82544_readreg;
   10540 		new_writereg = wm_gmii_i82544_writereg;
   10541 	} else if (sc->sc_type >= WM_T_80003) {
   10542 		/* 80003 */
   10543 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10544 		    && (new_phytype != WMPHY_GG82563)
   10545 		    && (new_phytype != WMPHY_UNKNOWN))
   10546 			doubt_phytype = new_phytype;
   10547 		new_phytype = WMPHY_GG82563;
   10548 		new_readreg = wm_gmii_i80003_readreg;
   10549 		new_writereg = wm_gmii_i80003_writereg;
   10550 	} else if (sc->sc_type >= WM_T_I210) {
   10551 		/* I210 and I211 */
   10552 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10553 		    && (new_phytype != WMPHY_I210)
   10554 		    && (new_phytype != WMPHY_UNKNOWN))
   10555 			doubt_phytype = new_phytype;
   10556 		new_phytype = WMPHY_I210;
   10557 		new_readreg = wm_gmii_gs40g_readreg;
   10558 		new_writereg = wm_gmii_gs40g_writereg;
   10559 	} else if (sc->sc_type >= WM_T_82580) {
   10560 		/* 82580, I350 and I354 */
   10561 		new_readreg = wm_gmii_82580_readreg;
   10562 		new_writereg = wm_gmii_82580_writereg;
   10563 	} else if (sc->sc_type >= WM_T_82544) {
   10564 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10565 		new_readreg = wm_gmii_i82544_readreg;
   10566 		new_writereg = wm_gmii_i82544_writereg;
   10567 	} else {
   10568 		new_readreg = wm_gmii_i82543_readreg;
   10569 		new_writereg = wm_gmii_i82543_writereg;
   10570 	}
   10571 
   10572 	if (new_phytype == WMPHY_BM) {
   10573 		/* All BM use _bm_ */
   10574 		new_readreg = wm_gmii_bm_readreg;
   10575 		new_writereg = wm_gmii_bm_writereg;
   10576 	}
   10577 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10578 		/* All PCH* use _hv_ */
   10579 		new_readreg = wm_gmii_hv_readreg;
   10580 		new_writereg = wm_gmii_hv_writereg;
   10581 	}
   10582 
   10583 	/* Diag output */
   10584 	if (dodiag) {
   10585 		if (doubt_phytype != WMPHY_UNKNOWN)
   10586 			aprint_error_dev(dev, "Assumed new PHY type was "
   10587 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10588 			    new_phytype);
   10589 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10590 		    && (sc->sc_phytype != new_phytype))
   10591 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10592 			    "was incorrect. New PHY type = %u\n",
   10593 			    sc->sc_phytype, new_phytype);
   10594 
   10595 		if ((mii->mii_readreg != NULL) &&
   10596 		    (new_phytype == WMPHY_UNKNOWN))
   10597 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10598 
   10599 		if ((mii->mii_readreg != NULL) &&
   10600 		    (mii->mii_readreg != new_readreg))
   10601 			aprint_error_dev(dev, "Previously assumed PHY "
   10602 			    "read/write function was incorrect.\n");
   10603 	}
   10604 
   10605 	/* Update now */
   10606 	sc->sc_phytype = new_phytype;
   10607 	mii->mii_readreg = new_readreg;
   10608 	mii->mii_writereg = new_writereg;
   10609 	if (new_readreg == wm_gmii_hv_readreg) {
   10610 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10611 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10612 	} else if (new_readreg == wm_sgmii_readreg) {
   10613 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10614 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10615 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10616 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10617 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10618 	}
   10619 }
   10620 
   10621 /*
   10622  * wm_get_phy_id_82575:
   10623  *
   10624  * Return PHY ID. Return -1 if it failed.
   10625  */
   10626 static int
   10627 wm_get_phy_id_82575(struct wm_softc *sc)
   10628 {
   10629 	uint32_t reg;
   10630 	int phyid = -1;
   10631 
   10632 	/* XXX */
   10633 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10634 		return -1;
   10635 
   10636 	if (wm_sgmii_uses_mdio(sc)) {
   10637 		switch (sc->sc_type) {
   10638 		case WM_T_82575:
   10639 		case WM_T_82576:
   10640 			reg = CSR_READ(sc, WMREG_MDIC);
   10641 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10642 			break;
   10643 		case WM_T_82580:
   10644 		case WM_T_I350:
   10645 		case WM_T_I354:
   10646 		case WM_T_I210:
   10647 		case WM_T_I211:
   10648 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10649 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10650 			break;
   10651 		default:
   10652 			return -1;
   10653 		}
   10654 	}
   10655 
   10656 	return phyid;
   10657 }
   10658 
   10659 /*
   10660  * wm_gmii_mediainit:
   10661  *
   10662  *	Initialize media for use on 1000BASE-T devices.
   10663  */
   10664 static void
   10665 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10666 {
   10667 	device_t dev = sc->sc_dev;
   10668 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10669 	struct mii_data *mii = &sc->sc_mii;
   10670 
   10671 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10672 		device_xname(sc->sc_dev), __func__));
   10673 
   10674 	/* We have GMII. */
   10675 	sc->sc_flags |= WM_F_HAS_MII;
   10676 
   10677 	if (sc->sc_type == WM_T_80003)
   10678 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10679 	else
   10680 		sc->sc_tipg = TIPG_1000T_DFLT;
   10681 
   10682 	/*
   10683 	 * Let the chip set speed/duplex on its own based on
   10684 	 * signals from the PHY.
   10685 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10686 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10687 	 */
   10688 	sc->sc_ctrl |= CTRL_SLU;
   10689 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10690 
   10691 	/* Initialize our media structures and probe the GMII. */
   10692 	mii->mii_ifp = ifp;
   10693 
   10694 	mii->mii_statchg = wm_gmii_statchg;
   10695 
   10696 	/* get PHY control from SMBus to PCIe */
   10697 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10698 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10699 	    || (sc->sc_type == WM_T_PCH_CNP))
   10700 		wm_init_phy_workarounds_pchlan(sc);
   10701 
   10702 	wm_gmii_reset(sc);
   10703 
   10704 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10705 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10706 	    wm_gmii_mediastatus, sc->sc_core_lock);
   10707 
   10708 	/* Setup internal SGMII PHY for SFP */
   10709 	wm_sgmii_sfp_preconfig(sc);
   10710 
   10711 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10712 	    || (sc->sc_type == WM_T_82580)
   10713 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10714 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10715 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10716 			/* Attach only one port */
   10717 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10718 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10719 		} else {
   10720 			int i, id;
   10721 			uint32_t ctrl_ext;
   10722 
   10723 			id = wm_get_phy_id_82575(sc);
   10724 			if (id != -1) {
   10725 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10726 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10727 			}
   10728 			if ((id == -1)
   10729 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10730 				/* Power on sgmii phy if it is disabled */
   10731 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10732 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10733 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10734 				CSR_WRITE_FLUSH(sc);
   10735 				delay(300*1000); /* XXX too long */
   10736 
   10737 				/*
   10738 				 * From 1 to 8.
   10739 				 *
   10740 				 * I2C access fails with I2C register's ERROR
   10741 				 * bit set, so prevent error message while
   10742 				 * scanning.
   10743 				 */
   10744 				sc->phy.no_errprint = true;
   10745 				for (i = 1; i < 8; i++)
   10746 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10747 					    0xffffffff, i, MII_OFFSET_ANY,
   10748 					    MIIF_DOPAUSE);
   10749 				sc->phy.no_errprint = false;
   10750 
   10751 				/* Restore previous sfp cage power state */
   10752 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10753 			}
   10754 		}
   10755 	} else
   10756 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10757 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10758 
   10759 	/*
   10760 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10761 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10762 	 */
   10763 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10764 		|| (sc->sc_type == WM_T_PCH_SPT)
   10765 		|| (sc->sc_type == WM_T_PCH_CNP))
   10766 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10767 		wm_set_mdio_slow_mode_hv(sc);
   10768 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10769 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10770 	}
   10771 
   10772 	/*
   10773 	 * (For ICH8 variants)
   10774 	 * If PHY detection failed, use BM's r/w function and retry.
   10775 	 */
   10776 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10777 		/* if failed, retry with *_bm_* */
   10778 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10779 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10780 		    sc->sc_phytype);
   10781 		sc->sc_phytype = WMPHY_BM;
   10782 		mii->mii_readreg = wm_gmii_bm_readreg;
   10783 		mii->mii_writereg = wm_gmii_bm_writereg;
   10784 
   10785 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10786 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10787 	}
   10788 
   10789 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10790 		/* Any PHY wasn't find */
   10791 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10792 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10793 		sc->sc_phytype = WMPHY_NONE;
   10794 	} else {
   10795 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10796 
   10797 		/*
   10798 		 * PHY Found! Check PHY type again by the second call of
   10799 		 * wm_gmii_setup_phytype.
   10800 		 */
   10801 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10802 		    child->mii_mpd_model);
   10803 
   10804 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10805 	}
   10806 }
   10807 
   10808 /*
   10809  * wm_gmii_mediachange:	[ifmedia interface function]
   10810  *
   10811  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10812  */
   10813 static int
   10814 wm_gmii_mediachange(struct ifnet *ifp)
   10815 {
   10816 	struct wm_softc *sc = ifp->if_softc;
   10817 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10818 	uint32_t reg;
   10819 	int rc;
   10820 
   10821 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10822 		device_xname(sc->sc_dev), __func__));
   10823 	if ((ifp->if_flags & IFF_UP) == 0)
   10824 		return 0;
   10825 
   10826 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10827 	if ((sc->sc_type == WM_T_82580)
   10828 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10829 	    || (sc->sc_type == WM_T_I211)) {
   10830 		reg = CSR_READ(sc, WMREG_PHPM);
   10831 		reg &= ~PHPM_GO_LINK_D;
   10832 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10833 	}
   10834 
   10835 	/* Disable D0 LPLU. */
   10836 	wm_lplu_d0_disable(sc);
   10837 
   10838 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10839 	sc->sc_ctrl |= CTRL_SLU;
   10840 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10841 	    || (sc->sc_type > WM_T_82543)) {
   10842 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10843 	} else {
   10844 		sc->sc_ctrl &= ~CTRL_ASDE;
   10845 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10846 		if (ife->ifm_media & IFM_FDX)
   10847 			sc->sc_ctrl |= CTRL_FD;
   10848 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10849 		case IFM_10_T:
   10850 			sc->sc_ctrl |= CTRL_SPEED_10;
   10851 			break;
   10852 		case IFM_100_TX:
   10853 			sc->sc_ctrl |= CTRL_SPEED_100;
   10854 			break;
   10855 		case IFM_1000_T:
   10856 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10857 			break;
   10858 		case IFM_NONE:
   10859 			/* There is no specific setting for IFM_NONE */
   10860 			break;
   10861 		default:
   10862 			panic("wm_gmii_mediachange: bad media 0x%x",
   10863 			    ife->ifm_media);
   10864 		}
   10865 	}
   10866 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10867 	CSR_WRITE_FLUSH(sc);
   10868 
   10869 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10870 		wm_serdes_mediachange(ifp);
   10871 
   10872 	if (sc->sc_type <= WM_T_82543)
   10873 		wm_gmii_reset(sc);
   10874 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10875 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10876 		/* allow time for SFP cage time to power up phy */
   10877 		delay(300 * 1000);
   10878 		wm_gmii_reset(sc);
   10879 	}
   10880 
   10881 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10882 		return 0;
   10883 	return rc;
   10884 }
   10885 
   10886 /*
   10887  * wm_gmii_mediastatus:	[ifmedia interface function]
   10888  *
   10889  *	Get the current interface media status on a 1000BASE-T device.
   10890  */
   10891 static void
   10892 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10893 {
   10894 	struct wm_softc *sc = ifp->if_softc;
   10895 
   10896 	ether_mediastatus(ifp, ifmr);
   10897 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10898 	    | sc->sc_flowflags;
   10899 }
   10900 
   10901 #define	MDI_IO		CTRL_SWDPIN(2)
   10902 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10903 #define	MDI_CLK		CTRL_SWDPIN(3)
   10904 
   10905 static void
   10906 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10907 {
   10908 	uint32_t i, v;
   10909 
   10910 	v = CSR_READ(sc, WMREG_CTRL);
   10911 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10912 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10913 
   10914 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10915 		if (data & i)
   10916 			v |= MDI_IO;
   10917 		else
   10918 			v &= ~MDI_IO;
   10919 		CSR_WRITE(sc, WMREG_CTRL, v);
   10920 		CSR_WRITE_FLUSH(sc);
   10921 		delay(10);
   10922 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10923 		CSR_WRITE_FLUSH(sc);
   10924 		delay(10);
   10925 		CSR_WRITE(sc, WMREG_CTRL, v);
   10926 		CSR_WRITE_FLUSH(sc);
   10927 		delay(10);
   10928 	}
   10929 }
   10930 
   10931 static uint16_t
   10932 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10933 {
   10934 	uint32_t v, i;
   10935 	uint16_t data = 0;
   10936 
   10937 	v = CSR_READ(sc, WMREG_CTRL);
   10938 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10939 	v |= CTRL_SWDPIO(3);
   10940 
   10941 	CSR_WRITE(sc, WMREG_CTRL, v);
   10942 	CSR_WRITE_FLUSH(sc);
   10943 	delay(10);
   10944 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10945 	CSR_WRITE_FLUSH(sc);
   10946 	delay(10);
   10947 	CSR_WRITE(sc, WMREG_CTRL, v);
   10948 	CSR_WRITE_FLUSH(sc);
   10949 	delay(10);
   10950 
   10951 	for (i = 0; i < 16; i++) {
   10952 		data <<= 1;
   10953 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10954 		CSR_WRITE_FLUSH(sc);
   10955 		delay(10);
   10956 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10957 			data |= 1;
   10958 		CSR_WRITE(sc, WMREG_CTRL, v);
   10959 		CSR_WRITE_FLUSH(sc);
   10960 		delay(10);
   10961 	}
   10962 
   10963 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10964 	CSR_WRITE_FLUSH(sc);
   10965 	delay(10);
   10966 	CSR_WRITE(sc, WMREG_CTRL, v);
   10967 	CSR_WRITE_FLUSH(sc);
   10968 	delay(10);
   10969 
   10970 	return data;
   10971 }
   10972 
   10973 #undef MDI_IO
   10974 #undef MDI_DIR
   10975 #undef MDI_CLK
   10976 
   10977 /*
   10978  * wm_gmii_i82543_readreg:	[mii interface function]
   10979  *
   10980  *	Read a PHY register on the GMII (i82543 version).
   10981  */
   10982 static int
   10983 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10984 {
   10985 	struct wm_softc *sc = device_private(dev);
   10986 
   10987 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10988 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10989 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10990 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10991 
   10992 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10993 		device_xname(dev), phy, reg, *val));
   10994 
   10995 	return 0;
   10996 }
   10997 
   10998 /*
   10999  * wm_gmii_i82543_writereg:	[mii interface function]
   11000  *
   11001  *	Write a PHY register on the GMII (i82543 version).
   11002  */
   11003 static int
   11004 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   11005 {
   11006 	struct wm_softc *sc = device_private(dev);
   11007 
   11008 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11009 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   11010 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   11011 	    (MII_COMMAND_START << 30), 32);
   11012 
   11013 	return 0;
   11014 }
   11015 
   11016 /*
   11017  * wm_gmii_mdic_readreg:	[mii interface function]
   11018  *
   11019  *	Read a PHY register on the GMII.
   11020  */
   11021 static int
   11022 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11023 {
   11024 	struct wm_softc *sc = device_private(dev);
   11025 	uint32_t mdic = 0;
   11026 	int i;
   11027 
   11028 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11029 	    && (reg > MII_ADDRMASK)) {
   11030 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11031 		    __func__, sc->sc_phytype, reg);
   11032 		reg &= MII_ADDRMASK;
   11033 	}
   11034 
   11035 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   11036 	    MDIC_REGADD(reg));
   11037 
   11038 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11039 		delay(50);
   11040 		mdic = CSR_READ(sc, WMREG_MDIC);
   11041 		if (mdic & MDIC_READY)
   11042 			break;
   11043 	}
   11044 
   11045 	if ((mdic & MDIC_READY) == 0) {
   11046 		DPRINTF(sc, WM_DEBUG_GMII,
   11047 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11048 			device_xname(dev), phy, reg));
   11049 		return ETIMEDOUT;
   11050 	} else if (mdic & MDIC_E) {
   11051 		/* This is normal if no PHY is present. */
   11052 		DPRINTF(sc, WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   11053 			device_xname(sc->sc_dev), phy, reg));
   11054 		return -1;
   11055 	} else
   11056 		*val = MDIC_DATA(mdic);
   11057 
   11058 	/*
   11059 	 * Allow some time after each MDIC transaction to avoid
   11060 	 * reading duplicate data in the next MDIC transaction.
   11061 	 */
   11062 	if (sc->sc_type == WM_T_PCH2)
   11063 		delay(100);
   11064 
   11065 	return 0;
   11066 }
   11067 
   11068 /*
   11069  * wm_gmii_mdic_writereg:	[mii interface function]
   11070  *
   11071  *	Write a PHY register on the GMII.
   11072  */
   11073 static int
   11074 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11075 {
   11076 	struct wm_softc *sc = device_private(dev);
   11077 	uint32_t mdic = 0;
   11078 	int i;
   11079 
   11080 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11081 	    && (reg > MII_ADDRMASK)) {
   11082 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11083 		    __func__, sc->sc_phytype, reg);
   11084 		reg &= MII_ADDRMASK;
   11085 	}
   11086 
   11087 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11088 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11089 
   11090 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11091 		delay(50);
   11092 		mdic = CSR_READ(sc, WMREG_MDIC);
   11093 		if (mdic & MDIC_READY)
   11094 			break;
   11095 	}
   11096 
   11097 	if ((mdic & MDIC_READY) == 0) {
   11098 		DPRINTF(sc, WM_DEBUG_GMII,
   11099 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11100 			device_xname(dev), phy, reg));
   11101 		return ETIMEDOUT;
   11102 	} else if (mdic & MDIC_E) {
   11103 		DPRINTF(sc, WM_DEBUG_GMII,
   11104 		    ("%s: MDIC write error: phy %d reg %d\n",
   11105 			device_xname(dev), phy, reg));
   11106 		return -1;
   11107 	}
   11108 
   11109 	/*
   11110 	 * Allow some time after each MDIC transaction to avoid
   11111 	 * reading duplicate data in the next MDIC transaction.
   11112 	 */
   11113 	if (sc->sc_type == WM_T_PCH2)
   11114 		delay(100);
   11115 
   11116 	return 0;
   11117 }
   11118 
   11119 /*
   11120  * wm_gmii_i82544_readreg:	[mii interface function]
   11121  *
   11122  *	Read a PHY register on the GMII.
   11123  */
   11124 static int
   11125 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11126 {
   11127 	struct wm_softc *sc = device_private(dev);
   11128 	int rv;
   11129 
   11130 	if (sc->phy.acquire(sc)) {
   11131 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11132 		return -1;
   11133 	}
   11134 
   11135 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11136 
   11137 	sc->phy.release(sc);
   11138 
   11139 	return rv;
   11140 }
   11141 
   11142 static int
   11143 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11144 {
   11145 	struct wm_softc *sc = device_private(dev);
   11146 	int rv;
   11147 
   11148 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11149 		switch (sc->sc_phytype) {
   11150 		case WMPHY_IGP:
   11151 		case WMPHY_IGP_2:
   11152 		case WMPHY_IGP_3:
   11153 			rv = wm_gmii_mdic_writereg(dev, phy,
   11154 			    IGPHY_PAGE_SELECT, reg);
   11155 			if (rv != 0)
   11156 				return rv;
   11157 			break;
   11158 		default:
   11159 #ifdef WM_DEBUG
   11160 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11161 			    __func__, sc->sc_phytype, reg);
   11162 #endif
   11163 			break;
   11164 		}
   11165 	}
   11166 
   11167 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11168 }
   11169 
   11170 /*
   11171  * wm_gmii_i82544_writereg:	[mii interface function]
   11172  *
   11173  *	Write a PHY register on the GMII.
   11174  */
   11175 static int
   11176 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11177 {
   11178 	struct wm_softc *sc = device_private(dev);
   11179 	int rv;
   11180 
   11181 	if (sc->phy.acquire(sc)) {
   11182 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11183 		return -1;
   11184 	}
   11185 
   11186 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11187 	sc->phy.release(sc);
   11188 
   11189 	return rv;
   11190 }
   11191 
   11192 static int
   11193 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11194 {
   11195 	struct wm_softc *sc = device_private(dev);
   11196 	int rv;
   11197 
   11198 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11199 		switch (sc->sc_phytype) {
   11200 		case WMPHY_IGP:
   11201 		case WMPHY_IGP_2:
   11202 		case WMPHY_IGP_3:
   11203 			rv = wm_gmii_mdic_writereg(dev, phy,
   11204 			    IGPHY_PAGE_SELECT, reg);
   11205 			if (rv != 0)
   11206 				return rv;
   11207 			break;
   11208 		default:
   11209 #ifdef WM_DEBUG
   11210 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11211 			    __func__, sc->sc_phytype, reg);
   11212 #endif
   11213 			break;
   11214 		}
   11215 	}
   11216 
   11217 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11218 }
   11219 
   11220 /*
   11221  * wm_gmii_i80003_readreg:	[mii interface function]
   11222  *
   11223  *	Read a PHY register on the kumeran
   11224  * This could be handled by the PHY layer if we didn't have to lock the
   11225  * resource ...
   11226  */
   11227 static int
   11228 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11229 {
   11230 	struct wm_softc *sc = device_private(dev);
   11231 	int page_select;
   11232 	uint16_t temp, temp2;
   11233 	int rv = 0;
   11234 
   11235 	if (phy != 1) /* Only one PHY on kumeran bus */
   11236 		return -1;
   11237 
   11238 	if (sc->phy.acquire(sc)) {
   11239 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11240 		return -1;
   11241 	}
   11242 
   11243 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11244 		page_select = GG82563_PHY_PAGE_SELECT;
   11245 	else {
   11246 		/*
   11247 		 * Use Alternative Page Select register to access registers
   11248 		 * 30 and 31.
   11249 		 */
   11250 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11251 	}
   11252 	temp = reg >> GG82563_PAGE_SHIFT;
   11253 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11254 		goto out;
   11255 
   11256 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11257 		/*
   11258 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11259 		 * register.
   11260 		 */
   11261 		delay(200);
   11262 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11263 		if ((rv != 0) || (temp2 != temp)) {
   11264 			device_printf(dev, "%s failed\n", __func__);
   11265 			rv = -1;
   11266 			goto out;
   11267 		}
   11268 		delay(200);
   11269 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11270 		delay(200);
   11271 	} else
   11272 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11273 
   11274 out:
   11275 	sc->phy.release(sc);
   11276 	return rv;
   11277 }
   11278 
   11279 /*
   11280  * wm_gmii_i80003_writereg:	[mii interface function]
   11281  *
   11282  *	Write a PHY register on the kumeran.
   11283  * This could be handled by the PHY layer if we didn't have to lock the
   11284  * resource ...
   11285  */
   11286 static int
   11287 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11288 {
   11289 	struct wm_softc *sc = device_private(dev);
   11290 	int page_select, rv;
   11291 	uint16_t temp, temp2;
   11292 
   11293 	if (phy != 1) /* Only one PHY on kumeran bus */
   11294 		return -1;
   11295 
   11296 	if (sc->phy.acquire(sc)) {
   11297 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11298 		return -1;
   11299 	}
   11300 
   11301 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11302 		page_select = GG82563_PHY_PAGE_SELECT;
   11303 	else {
   11304 		/*
   11305 		 * Use Alternative Page Select register to access registers
   11306 		 * 30 and 31.
   11307 		 */
   11308 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11309 	}
   11310 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11311 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11312 		goto out;
   11313 
   11314 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11315 		/*
   11316 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11317 		 * register.
   11318 		 */
   11319 		delay(200);
   11320 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11321 		if ((rv != 0) || (temp2 != temp)) {
   11322 			device_printf(dev, "%s failed\n", __func__);
   11323 			rv = -1;
   11324 			goto out;
   11325 		}
   11326 		delay(200);
   11327 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11328 		delay(200);
   11329 	} else
   11330 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11331 
   11332 out:
   11333 	sc->phy.release(sc);
   11334 	return rv;
   11335 }
   11336 
   11337 /*
   11338  * wm_gmii_bm_readreg:	[mii interface function]
   11339  *
   11340  *	Read a PHY register on the kumeran
   11341  * This could be handled by the PHY layer if we didn't have to lock the
   11342  * resource ...
   11343  */
   11344 static int
   11345 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11346 {
   11347 	struct wm_softc *sc = device_private(dev);
   11348 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11349 	int rv;
   11350 
   11351 	if (sc->phy.acquire(sc)) {
   11352 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11353 		return -1;
   11354 	}
   11355 
   11356 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11357 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11358 		    || (reg == 31)) ? 1 : phy;
   11359 	/* Page 800 works differently than the rest so it has its own func */
   11360 	if (page == BM_WUC_PAGE) {
   11361 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11362 		goto release;
   11363 	}
   11364 
   11365 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11366 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11367 		    && (sc->sc_type != WM_T_82583))
   11368 			rv = wm_gmii_mdic_writereg(dev, phy,
   11369 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11370 		else
   11371 			rv = wm_gmii_mdic_writereg(dev, phy,
   11372 			    BME1000_PHY_PAGE_SELECT, page);
   11373 		if (rv != 0)
   11374 			goto release;
   11375 	}
   11376 
   11377 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11378 
   11379 release:
   11380 	sc->phy.release(sc);
   11381 	return rv;
   11382 }
   11383 
   11384 /*
   11385  * wm_gmii_bm_writereg:	[mii interface function]
   11386  *
   11387  *	Write a PHY register on the kumeran.
   11388  * This could be handled by the PHY layer if we didn't have to lock the
   11389  * resource ...
   11390  */
   11391 static int
   11392 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11393 {
   11394 	struct wm_softc *sc = device_private(dev);
   11395 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11396 	int rv;
   11397 
   11398 	if (sc->phy.acquire(sc)) {
   11399 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11400 		return -1;
   11401 	}
   11402 
   11403 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11404 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11405 		    || (reg == 31)) ? 1 : phy;
   11406 	/* Page 800 works differently than the rest so it has its own func */
   11407 	if (page == BM_WUC_PAGE) {
   11408 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11409 		goto release;
   11410 	}
   11411 
   11412 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11413 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11414 		    && (sc->sc_type != WM_T_82583))
   11415 			rv = wm_gmii_mdic_writereg(dev, phy,
   11416 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11417 		else
   11418 			rv = wm_gmii_mdic_writereg(dev, phy,
   11419 			    BME1000_PHY_PAGE_SELECT, page);
   11420 		if (rv != 0)
   11421 			goto release;
   11422 	}
   11423 
   11424 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11425 
   11426 release:
   11427 	sc->phy.release(sc);
   11428 	return rv;
   11429 }
   11430 
   11431 /*
   11432  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11433  *  @dev: pointer to the HW structure
   11434  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11435  *
   11436  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11437  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11438  */
   11439 static int
   11440 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11441 {
   11442 #ifdef WM_DEBUG
   11443 	struct wm_softc *sc = device_private(dev);
   11444 #endif
   11445 	uint16_t temp;
   11446 	int rv;
   11447 
   11448 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11449 		device_xname(dev), __func__));
   11450 
   11451 	if (!phy_regp)
   11452 		return -1;
   11453 
   11454 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11455 
   11456 	/* Select Port Control Registers page */
   11457 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11458 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11459 	if (rv != 0)
   11460 		return rv;
   11461 
   11462 	/* Read WUCE and save it */
   11463 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11464 	if (rv != 0)
   11465 		return rv;
   11466 
   11467 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11468 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11469 	 */
   11470 	temp = *phy_regp;
   11471 	temp |= BM_WUC_ENABLE_BIT;
   11472 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11473 
   11474 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11475 		return rv;
   11476 
   11477 	/* Select Host Wakeup Registers page - caller now able to write
   11478 	 * registers on the Wakeup registers page
   11479 	 */
   11480 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11481 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11482 }
   11483 
   11484 /*
   11485  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11486  *  @dev: pointer to the HW structure
   11487  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11488  *
   11489  *  Restore BM_WUC_ENABLE_REG to its original value.
   11490  *
   11491  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11492  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11493  *  caller.
   11494  */
   11495 static int
   11496 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11497 {
   11498 #ifdef WM_DEBUG
   11499 	struct wm_softc *sc = device_private(dev);
   11500 #endif
   11501 
   11502 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11503 		device_xname(dev), __func__));
   11504 
   11505 	if (!phy_regp)
   11506 		return -1;
   11507 
   11508 	/* Select Port Control Registers page */
   11509 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11510 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11511 
   11512 	/* Restore 769.17 to its original value */
   11513 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11514 
   11515 	return 0;
   11516 }
   11517 
   11518 /*
   11519  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11520  *  @sc: pointer to the HW structure
   11521  *  @offset: register offset to be read or written
   11522  *  @val: pointer to the data to read or write
   11523  *  @rd: determines if operation is read or write
   11524  *  @page_set: BM_WUC_PAGE already set and access enabled
   11525  *
   11526  *  Read the PHY register at offset and store the retrieved information in
   11527  *  data, or write data to PHY register at offset.  Note the procedure to
   11528  *  access the PHY wakeup registers is different than reading the other PHY
   11529  *  registers. It works as such:
   11530  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11531  *  2) Set page to 800 for host (801 if we were manageability)
   11532  *  3) Write the address using the address opcode (0x11)
   11533  *  4) Read or write the data using the data opcode (0x12)
   11534  *  5) Restore 769.17.2 to its original value
   11535  *
   11536  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11537  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11538  *
   11539  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11540  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11541  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11542  */
   11543 static int
   11544 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11545 	bool page_set)
   11546 {
   11547 	struct wm_softc *sc = device_private(dev);
   11548 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11549 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11550 	uint16_t wuce;
   11551 	int rv = 0;
   11552 
   11553 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11554 		device_xname(dev), __func__));
   11555 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11556 	if ((sc->sc_type == WM_T_PCH)
   11557 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11558 		device_printf(dev,
   11559 		    "Attempting to access page %d while gig enabled.\n", page);
   11560 	}
   11561 
   11562 	if (!page_set) {
   11563 		/* Enable access to PHY wakeup registers */
   11564 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11565 		if (rv != 0) {
   11566 			device_printf(dev,
   11567 			    "%s: Could not enable PHY wakeup reg access\n",
   11568 			    __func__);
   11569 			return rv;
   11570 		}
   11571 	}
   11572 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11573 		device_xname(sc->sc_dev), __func__, page, regnum));
   11574 
   11575 	/*
   11576 	 * 2) Access PHY wakeup register.
   11577 	 * See wm_access_phy_wakeup_reg_bm.
   11578 	 */
   11579 
   11580 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11581 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11582 	if (rv != 0)
   11583 		return rv;
   11584 
   11585 	if (rd) {
   11586 		/* Read the Wakeup register page value using opcode 0x12 */
   11587 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11588 	} else {
   11589 		/* Write the Wakeup register page value using opcode 0x12 */
   11590 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11591 	}
   11592 	if (rv != 0)
   11593 		return rv;
   11594 
   11595 	if (!page_set)
   11596 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11597 
   11598 	return rv;
   11599 }
   11600 
   11601 /*
   11602  * wm_gmii_hv_readreg:	[mii interface function]
   11603  *
   11604  *	Read a PHY register on the kumeran
   11605  * This could be handled by the PHY layer if we didn't have to lock the
   11606  * resource ...
   11607  */
   11608 static int
   11609 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11610 {
   11611 	struct wm_softc *sc = device_private(dev);
   11612 	int rv;
   11613 
   11614 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11615 		device_xname(dev), __func__));
   11616 	if (sc->phy.acquire(sc)) {
   11617 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11618 		return -1;
   11619 	}
   11620 
   11621 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11622 	sc->phy.release(sc);
   11623 	return rv;
   11624 }
   11625 
   11626 static int
   11627 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11628 {
   11629 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11630 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11631 	int rv;
   11632 
   11633 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11634 
   11635 	/* Page 800 works differently than the rest so it has its own func */
   11636 	if (page == BM_WUC_PAGE)
   11637 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11638 
   11639 	/*
   11640 	 * Lower than page 768 works differently than the rest so it has its
   11641 	 * own func
   11642 	 */
   11643 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11644 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11645 		return -1;
   11646 	}
   11647 
   11648 	/*
   11649 	 * XXX I21[789] documents say that the SMBus Address register is at
   11650 	 * PHY address 01, Page 0 (not 768), Register 26.
   11651 	 */
   11652 	if (page == HV_INTC_FC_PAGE_START)
   11653 		page = 0;
   11654 
   11655 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11656 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11657 		    page << BME1000_PAGE_SHIFT);
   11658 		if (rv != 0)
   11659 			return rv;
   11660 	}
   11661 
   11662 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11663 }
   11664 
   11665 /*
   11666  * wm_gmii_hv_writereg:	[mii interface function]
   11667  *
   11668  *	Write a PHY register on the kumeran.
   11669  * This could be handled by the PHY layer if we didn't have to lock the
   11670  * resource ...
   11671  */
   11672 static int
   11673 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11674 {
   11675 	struct wm_softc *sc = device_private(dev);
   11676 	int rv;
   11677 
   11678 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11679 		device_xname(dev), __func__));
   11680 
   11681 	if (sc->phy.acquire(sc)) {
   11682 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11683 		return -1;
   11684 	}
   11685 
   11686 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11687 	sc->phy.release(sc);
   11688 
   11689 	return rv;
   11690 }
   11691 
   11692 static int
   11693 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11694 {
   11695 	struct wm_softc *sc = device_private(dev);
   11696 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11697 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11698 	int rv;
   11699 
   11700 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11701 
   11702 	/* Page 800 works differently than the rest so it has its own func */
   11703 	if (page == BM_WUC_PAGE)
   11704 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11705 		    false);
   11706 
   11707 	/*
   11708 	 * Lower than page 768 works differently than the rest so it has its
   11709 	 * own func
   11710 	 */
   11711 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11712 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11713 		return -1;
   11714 	}
   11715 
   11716 	{
   11717 		/*
   11718 		 * XXX I21[789] documents say that the SMBus Address register
   11719 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11720 		 */
   11721 		if (page == HV_INTC_FC_PAGE_START)
   11722 			page = 0;
   11723 
   11724 		/*
   11725 		 * XXX Workaround MDIO accesses being disabled after entering
   11726 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11727 		 * register is set)
   11728 		 */
   11729 		if (sc->sc_phytype == WMPHY_82578) {
   11730 			struct mii_softc *child;
   11731 
   11732 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11733 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11734 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11735 			    && ((val & (1 << 11)) != 0)) {
   11736 				device_printf(dev, "XXX need workaround\n");
   11737 			}
   11738 		}
   11739 
   11740 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11741 			rv = wm_gmii_mdic_writereg(dev, 1,
   11742 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11743 			if (rv != 0)
   11744 				return rv;
   11745 		}
   11746 	}
   11747 
   11748 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11749 }
   11750 
   11751 /*
   11752  * wm_gmii_82580_readreg:	[mii interface function]
   11753  *
   11754  *	Read a PHY register on the 82580 and I350.
   11755  * This could be handled by the PHY layer if we didn't have to lock the
   11756  * resource ...
   11757  */
   11758 static int
   11759 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11760 {
   11761 	struct wm_softc *sc = device_private(dev);
   11762 	int rv;
   11763 
   11764 	if (sc->phy.acquire(sc) != 0) {
   11765 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11766 		return -1;
   11767 	}
   11768 
   11769 #ifdef DIAGNOSTIC
   11770 	if (reg > MII_ADDRMASK) {
   11771 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11772 		    __func__, sc->sc_phytype, reg);
   11773 		reg &= MII_ADDRMASK;
   11774 	}
   11775 #endif
   11776 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11777 
   11778 	sc->phy.release(sc);
   11779 	return rv;
   11780 }
   11781 
   11782 /*
   11783  * wm_gmii_82580_writereg:	[mii interface function]
   11784  *
   11785  *	Write a PHY register on the 82580 and I350.
   11786  * This could be handled by the PHY layer if we didn't have to lock the
   11787  * resource ...
   11788  */
   11789 static int
   11790 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11791 {
   11792 	struct wm_softc *sc = device_private(dev);
   11793 	int rv;
   11794 
   11795 	if (sc->phy.acquire(sc) != 0) {
   11796 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11797 		return -1;
   11798 	}
   11799 
   11800 #ifdef DIAGNOSTIC
   11801 	if (reg > MII_ADDRMASK) {
   11802 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11803 		    __func__, sc->sc_phytype, reg);
   11804 		reg &= MII_ADDRMASK;
   11805 	}
   11806 #endif
   11807 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11808 
   11809 	sc->phy.release(sc);
   11810 	return rv;
   11811 }
   11812 
   11813 /*
   11814  * wm_gmii_gs40g_readreg:	[mii interface function]
   11815  *
   11816  *	Read a PHY register on the I2100 and I211.
   11817  * This could be handled by the PHY layer if we didn't have to lock the
   11818  * resource ...
   11819  */
   11820 static int
   11821 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11822 {
   11823 	struct wm_softc *sc = device_private(dev);
   11824 	int page, offset;
   11825 	int rv;
   11826 
   11827 	/* Acquire semaphore */
   11828 	if (sc->phy.acquire(sc)) {
   11829 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11830 		return -1;
   11831 	}
   11832 
   11833 	/* Page select */
   11834 	page = reg >> GS40G_PAGE_SHIFT;
   11835 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11836 	if (rv != 0)
   11837 		goto release;
   11838 
   11839 	/* Read reg */
   11840 	offset = reg & GS40G_OFFSET_MASK;
   11841 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11842 
   11843 release:
   11844 	sc->phy.release(sc);
   11845 	return rv;
   11846 }
   11847 
   11848 /*
   11849  * wm_gmii_gs40g_writereg:	[mii interface function]
   11850  *
   11851  *	Write a PHY register on the I210 and I211.
   11852  * This could be handled by the PHY layer if we didn't have to lock the
   11853  * resource ...
   11854  */
   11855 static int
   11856 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11857 {
   11858 	struct wm_softc *sc = device_private(dev);
   11859 	uint16_t page;
   11860 	int offset, rv;
   11861 
   11862 	/* Acquire semaphore */
   11863 	if (sc->phy.acquire(sc)) {
   11864 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11865 		return -1;
   11866 	}
   11867 
   11868 	/* Page select */
   11869 	page = reg >> GS40G_PAGE_SHIFT;
   11870 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11871 	if (rv != 0)
   11872 		goto release;
   11873 
   11874 	/* Write reg */
   11875 	offset = reg & GS40G_OFFSET_MASK;
   11876 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11877 
   11878 release:
   11879 	/* Release semaphore */
   11880 	sc->phy.release(sc);
   11881 	return rv;
   11882 }
   11883 
   11884 /*
   11885  * wm_gmii_statchg:	[mii interface function]
   11886  *
   11887  *	Callback from MII layer when media changes.
   11888  */
   11889 static void
   11890 wm_gmii_statchg(struct ifnet *ifp)
   11891 {
   11892 	struct wm_softc *sc = ifp->if_softc;
   11893 	struct mii_data *mii = &sc->sc_mii;
   11894 
   11895 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11896 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11897 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11898 
   11899 	/* Get flow control negotiation result. */
   11900 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11901 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11902 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11903 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11904 	}
   11905 
   11906 	if (sc->sc_flowflags & IFM_FLOW) {
   11907 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11908 			sc->sc_ctrl |= CTRL_TFCE;
   11909 			sc->sc_fcrtl |= FCRTL_XONE;
   11910 		}
   11911 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11912 			sc->sc_ctrl |= CTRL_RFCE;
   11913 	}
   11914 
   11915 	if (mii->mii_media_active & IFM_FDX) {
   11916 		DPRINTF(sc, WM_DEBUG_LINK,
   11917 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11918 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11919 	} else {
   11920 		DPRINTF(sc, WM_DEBUG_LINK,
   11921 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11922 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11923 	}
   11924 
   11925 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11926 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11927 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11928 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11929 	if (sc->sc_type == WM_T_80003) {
   11930 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11931 		case IFM_1000_T:
   11932 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11933 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11934 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11935 			break;
   11936 		default:
   11937 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11938 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11939 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11940 			break;
   11941 		}
   11942 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11943 	}
   11944 }
   11945 
   11946 /* kumeran related (80003, ICH* and PCH*) */
   11947 
   11948 /*
   11949  * wm_kmrn_readreg:
   11950  *
   11951  *	Read a kumeran register
   11952  */
   11953 static int
   11954 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11955 {
   11956 	int rv;
   11957 
   11958 	if (sc->sc_type == WM_T_80003)
   11959 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11960 	else
   11961 		rv = sc->phy.acquire(sc);
   11962 	if (rv != 0) {
   11963 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11964 		    __func__);
   11965 		return rv;
   11966 	}
   11967 
   11968 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11969 
   11970 	if (sc->sc_type == WM_T_80003)
   11971 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11972 	else
   11973 		sc->phy.release(sc);
   11974 
   11975 	return rv;
   11976 }
   11977 
   11978 static int
   11979 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11980 {
   11981 
   11982 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11983 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11984 	    KUMCTRLSTA_REN);
   11985 	CSR_WRITE_FLUSH(sc);
   11986 	delay(2);
   11987 
   11988 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11989 
   11990 	return 0;
   11991 }
   11992 
   11993 /*
   11994  * wm_kmrn_writereg:
   11995  *
   11996  *	Write a kumeran register
   11997  */
   11998 static int
   11999 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   12000 {
   12001 	int rv;
   12002 
   12003 	if (sc->sc_type == WM_T_80003)
   12004 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12005 	else
   12006 		rv = sc->phy.acquire(sc);
   12007 	if (rv != 0) {
   12008 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12009 		    __func__);
   12010 		return rv;
   12011 	}
   12012 
   12013 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   12014 
   12015 	if (sc->sc_type == WM_T_80003)
   12016 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12017 	else
   12018 		sc->phy.release(sc);
   12019 
   12020 	return rv;
   12021 }
   12022 
   12023 static int
   12024 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   12025 {
   12026 
   12027 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12028 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   12029 
   12030 	return 0;
   12031 }
   12032 
   12033 /*
   12034  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   12035  * This access method is different from IEEE MMD.
   12036  */
   12037 static int
   12038 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   12039 {
   12040 	struct wm_softc *sc = device_private(dev);
   12041 	int rv;
   12042 
   12043 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   12044 	if (rv != 0)
   12045 		return rv;
   12046 
   12047 	if (rd)
   12048 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12049 	else
   12050 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12051 	return rv;
   12052 }
   12053 
   12054 static int
   12055 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12056 {
   12057 
   12058 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12059 }
   12060 
   12061 static int
   12062 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12063 {
   12064 
   12065 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12066 }
   12067 
   12068 /* SGMII related */
   12069 
   12070 /*
   12071  * wm_sgmii_uses_mdio
   12072  *
   12073  * Check whether the transaction is to the internal PHY or the external
   12074  * MDIO interface. Return true if it's MDIO.
   12075  */
   12076 static bool
   12077 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12078 {
   12079 	uint32_t reg;
   12080 	bool ismdio = false;
   12081 
   12082 	switch (sc->sc_type) {
   12083 	case WM_T_82575:
   12084 	case WM_T_82576:
   12085 		reg = CSR_READ(sc, WMREG_MDIC);
   12086 		ismdio = ((reg & MDIC_DEST) != 0);
   12087 		break;
   12088 	case WM_T_82580:
   12089 	case WM_T_I350:
   12090 	case WM_T_I354:
   12091 	case WM_T_I210:
   12092 	case WM_T_I211:
   12093 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12094 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12095 		break;
   12096 	default:
   12097 		break;
   12098 	}
   12099 
   12100 	return ismdio;
   12101 }
   12102 
   12103 /* Setup internal SGMII PHY for SFP */
   12104 static void
   12105 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12106 {
   12107 	uint16_t id1, id2, phyreg;
   12108 	int i, rv;
   12109 
   12110 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12111 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12112 		return;
   12113 
   12114 	for (i = 0; i < MII_NPHY; i++) {
   12115 		sc->phy.no_errprint = true;
   12116 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12117 		if (rv != 0)
   12118 			continue;
   12119 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12120 		if (rv != 0)
   12121 			continue;
   12122 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12123 			continue;
   12124 		sc->phy.no_errprint = false;
   12125 
   12126 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12127 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12128 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12129 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12130 		break;
   12131 	}
   12132 
   12133 }
   12134 
   12135 /*
   12136  * wm_sgmii_readreg:	[mii interface function]
   12137  *
   12138  *	Read a PHY register on the SGMII
   12139  * This could be handled by the PHY layer if we didn't have to lock the
   12140  * resource ...
   12141  */
   12142 static int
   12143 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12144 {
   12145 	struct wm_softc *sc = device_private(dev);
   12146 	int rv;
   12147 
   12148 	if (sc->phy.acquire(sc)) {
   12149 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12150 		return -1;
   12151 	}
   12152 
   12153 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12154 
   12155 	sc->phy.release(sc);
   12156 	return rv;
   12157 }
   12158 
   12159 static int
   12160 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12161 {
   12162 	struct wm_softc *sc = device_private(dev);
   12163 	uint32_t i2ccmd;
   12164 	int i, rv = 0;
   12165 
   12166 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12167 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12168 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12169 
   12170 	/* Poll the ready bit */
   12171 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12172 		delay(50);
   12173 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12174 		if (i2ccmd & I2CCMD_READY)
   12175 			break;
   12176 	}
   12177 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12178 		device_printf(dev, "I2CCMD Read did not complete\n");
   12179 		rv = ETIMEDOUT;
   12180 	}
   12181 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12182 		if (!sc->phy.no_errprint)
   12183 			device_printf(dev, "I2CCMD Error bit set\n");
   12184 		rv = EIO;
   12185 	}
   12186 
   12187 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12188 
   12189 	return rv;
   12190 }
   12191 
   12192 /*
   12193  * wm_sgmii_writereg:	[mii interface function]
   12194  *
   12195  *	Write a PHY register on the SGMII.
   12196  * This could be handled by the PHY layer if we didn't have to lock the
   12197  * resource ...
   12198  */
   12199 static int
   12200 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12201 {
   12202 	struct wm_softc *sc = device_private(dev);
   12203 	int rv;
   12204 
   12205 	if (sc->phy.acquire(sc) != 0) {
   12206 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12207 		return -1;
   12208 	}
   12209 
   12210 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12211 
   12212 	sc->phy.release(sc);
   12213 
   12214 	return rv;
   12215 }
   12216 
   12217 static int
   12218 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12219 {
   12220 	struct wm_softc *sc = device_private(dev);
   12221 	uint32_t i2ccmd;
   12222 	uint16_t swapdata;
   12223 	int rv = 0;
   12224 	int i;
   12225 
   12226 	/* Swap the data bytes for the I2C interface */
   12227 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12228 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12229 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12230 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12231 
   12232 	/* Poll the ready bit */
   12233 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12234 		delay(50);
   12235 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12236 		if (i2ccmd & I2CCMD_READY)
   12237 			break;
   12238 	}
   12239 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12240 		device_printf(dev, "I2CCMD Write did not complete\n");
   12241 		rv = ETIMEDOUT;
   12242 	}
   12243 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12244 		device_printf(dev, "I2CCMD Error bit set\n");
   12245 		rv = EIO;
   12246 	}
   12247 
   12248 	return rv;
   12249 }
   12250 
   12251 /* TBI related */
   12252 
   12253 static bool
   12254 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12255 {
   12256 	bool sig;
   12257 
   12258 	sig = ctrl & CTRL_SWDPIN(1);
   12259 
   12260 	/*
   12261 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12262 	 * detect a signal, 1 if they don't.
   12263 	 */
   12264 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12265 		sig = !sig;
   12266 
   12267 	return sig;
   12268 }
   12269 
   12270 /*
   12271  * wm_tbi_mediainit:
   12272  *
   12273  *	Initialize media for use on 1000BASE-X devices.
   12274  */
   12275 static void
   12276 wm_tbi_mediainit(struct wm_softc *sc)
   12277 {
   12278 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12279 	const char *sep = "";
   12280 
   12281 	if (sc->sc_type < WM_T_82543)
   12282 		sc->sc_tipg = TIPG_WM_DFLT;
   12283 	else
   12284 		sc->sc_tipg = TIPG_LG_DFLT;
   12285 
   12286 	sc->sc_tbi_serdes_anegticks = 5;
   12287 
   12288 	/* Initialize our media structures */
   12289 	sc->sc_mii.mii_ifp = ifp;
   12290 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12291 
   12292 	ifp->if_baudrate = IF_Gbps(1);
   12293 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12294 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12295 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12296 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12297 		    sc->sc_core_lock);
   12298 	} else {
   12299 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12300 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12301 	}
   12302 
   12303 	/*
   12304 	 * SWD Pins:
   12305 	 *
   12306 	 *	0 = Link LED (output)
   12307 	 *	1 = Loss Of Signal (input)
   12308 	 */
   12309 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12310 
   12311 	/* XXX Perhaps this is only for TBI */
   12312 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12313 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12314 
   12315 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12316 		sc->sc_ctrl &= ~CTRL_LRST;
   12317 
   12318 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12319 
   12320 #define	ADD(ss, mm, dd)							\
   12321 do {									\
   12322 	aprint_normal("%s%s", sep, ss);					\
   12323 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12324 	sep = ", ";							\
   12325 } while (/*CONSTCOND*/0)
   12326 
   12327 	aprint_normal_dev(sc->sc_dev, "");
   12328 
   12329 	if (sc->sc_type == WM_T_I354) {
   12330 		uint32_t status;
   12331 
   12332 		status = CSR_READ(sc, WMREG_STATUS);
   12333 		if (((status & STATUS_2P5_SKU) != 0)
   12334 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12335 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12336 		} else
   12337 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12338 	} else if (sc->sc_type == WM_T_82545) {
   12339 		/* Only 82545 is LX (XXX except SFP) */
   12340 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12341 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12342 	} else if (sc->sc_sfptype != 0) {
   12343 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12344 		switch (sc->sc_sfptype) {
   12345 		default:
   12346 		case SFF_SFP_ETH_FLAGS_1000SX:
   12347 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12348 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12349 			break;
   12350 		case SFF_SFP_ETH_FLAGS_1000LX:
   12351 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12352 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12353 			break;
   12354 		case SFF_SFP_ETH_FLAGS_1000CX:
   12355 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12356 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12357 			break;
   12358 		case SFF_SFP_ETH_FLAGS_1000T:
   12359 			ADD("1000baseT", IFM_1000_T, 0);
   12360 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12361 			break;
   12362 		case SFF_SFP_ETH_FLAGS_100FX:
   12363 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12364 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12365 			break;
   12366 		}
   12367 	} else {
   12368 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12369 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12370 	}
   12371 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12372 	aprint_normal("\n");
   12373 
   12374 #undef ADD
   12375 
   12376 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12377 }
   12378 
   12379 /*
   12380  * wm_tbi_mediachange:	[ifmedia interface function]
   12381  *
   12382  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12383  */
   12384 static int
   12385 wm_tbi_mediachange(struct ifnet *ifp)
   12386 {
   12387 	struct wm_softc *sc = ifp->if_softc;
   12388 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12389 	uint32_t status, ctrl;
   12390 	bool signal;
   12391 	int i;
   12392 
   12393 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12394 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12395 		/* XXX need some work for >= 82571 and < 82575 */
   12396 		if (sc->sc_type < WM_T_82575)
   12397 			return 0;
   12398 	}
   12399 
   12400 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12401 	    || (sc->sc_type >= WM_T_82575))
   12402 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12403 
   12404 	sc->sc_ctrl &= ~CTRL_LRST;
   12405 	sc->sc_txcw = TXCW_ANE;
   12406 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12407 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12408 	else if (ife->ifm_media & IFM_FDX)
   12409 		sc->sc_txcw |= TXCW_FD;
   12410 	else
   12411 		sc->sc_txcw |= TXCW_HD;
   12412 
   12413 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12414 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12415 
   12416 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12417 		device_xname(sc->sc_dev), sc->sc_txcw));
   12418 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12419 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12420 	CSR_WRITE_FLUSH(sc);
   12421 	delay(1000);
   12422 
   12423 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12424 	signal = wm_tbi_havesignal(sc, ctrl);
   12425 
   12426 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12427 		signal));
   12428 
   12429 	if (signal) {
   12430 		/* Have signal; wait for the link to come up. */
   12431 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12432 			delay(10000);
   12433 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12434 				break;
   12435 		}
   12436 
   12437 		DPRINTF(sc, WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12438 			device_xname(sc->sc_dev), i));
   12439 
   12440 		status = CSR_READ(sc, WMREG_STATUS);
   12441 		DPRINTF(sc, WM_DEBUG_LINK,
   12442 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12443 			device_xname(sc->sc_dev), status, STATUS_LU));
   12444 		if (status & STATUS_LU) {
   12445 			/* Link is up. */
   12446 			DPRINTF(sc, WM_DEBUG_LINK,
   12447 			    ("%s: LINK: set media -> link up %s\n",
   12448 				device_xname(sc->sc_dev),
   12449 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12450 
   12451 			/*
   12452 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12453 			 * so we should update sc->sc_ctrl
   12454 			 */
   12455 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12456 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12457 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12458 			if (status & STATUS_FD)
   12459 				sc->sc_tctl |=
   12460 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12461 			else
   12462 				sc->sc_tctl |=
   12463 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12464 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12465 				sc->sc_fcrtl |= FCRTL_XONE;
   12466 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12467 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12468 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12469 			sc->sc_tbi_linkup = 1;
   12470 		} else {
   12471 			if (i == WM_LINKUP_TIMEOUT)
   12472 				wm_check_for_link(sc);
   12473 			/* Link is down. */
   12474 			DPRINTF(sc, WM_DEBUG_LINK,
   12475 			    ("%s: LINK: set media -> link down\n",
   12476 				device_xname(sc->sc_dev)));
   12477 			sc->sc_tbi_linkup = 0;
   12478 		}
   12479 	} else {
   12480 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12481 			device_xname(sc->sc_dev)));
   12482 		sc->sc_tbi_linkup = 0;
   12483 	}
   12484 
   12485 	wm_tbi_serdes_set_linkled(sc);
   12486 
   12487 	return 0;
   12488 }
   12489 
   12490 /*
   12491  * wm_tbi_mediastatus:	[ifmedia interface function]
   12492  *
   12493  *	Get the current interface media status on a 1000BASE-X device.
   12494  */
   12495 static void
   12496 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12497 {
   12498 	struct wm_softc *sc = ifp->if_softc;
   12499 	uint32_t ctrl, status;
   12500 
   12501 	ifmr->ifm_status = IFM_AVALID;
   12502 	ifmr->ifm_active = IFM_ETHER;
   12503 
   12504 	status = CSR_READ(sc, WMREG_STATUS);
   12505 	if ((status & STATUS_LU) == 0) {
   12506 		ifmr->ifm_active |= IFM_NONE;
   12507 		return;
   12508 	}
   12509 
   12510 	ifmr->ifm_status |= IFM_ACTIVE;
   12511 	/* Only 82545 is LX */
   12512 	if (sc->sc_type == WM_T_82545)
   12513 		ifmr->ifm_active |= IFM_1000_LX;
   12514 	else
   12515 		ifmr->ifm_active |= IFM_1000_SX;
   12516 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12517 		ifmr->ifm_active |= IFM_FDX;
   12518 	else
   12519 		ifmr->ifm_active |= IFM_HDX;
   12520 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12521 	if (ctrl & CTRL_RFCE)
   12522 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12523 	if (ctrl & CTRL_TFCE)
   12524 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12525 }
   12526 
   12527 /* XXX TBI only */
   12528 static int
   12529 wm_check_for_link(struct wm_softc *sc)
   12530 {
   12531 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12532 	uint32_t rxcw;
   12533 	uint32_t ctrl;
   12534 	uint32_t status;
   12535 	bool signal;
   12536 
   12537 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   12538 		device_xname(sc->sc_dev), __func__));
   12539 
   12540 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12541 		/* XXX need some work for >= 82571 */
   12542 		if (sc->sc_type >= WM_T_82571) {
   12543 			sc->sc_tbi_linkup = 1;
   12544 			return 0;
   12545 		}
   12546 	}
   12547 
   12548 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12549 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12550 	status = CSR_READ(sc, WMREG_STATUS);
   12551 	signal = wm_tbi_havesignal(sc, ctrl);
   12552 
   12553 	DPRINTF(sc, WM_DEBUG_LINK,
   12554 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12555 		device_xname(sc->sc_dev), __func__, signal,
   12556 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12557 
   12558 	/*
   12559 	 * SWDPIN   LU RXCW
   12560 	 *	0    0	  0
   12561 	 *	0    0	  1	(should not happen)
   12562 	 *	0    1	  0	(should not happen)
   12563 	 *	0    1	  1	(should not happen)
   12564 	 *	1    0	  0	Disable autonego and force linkup
   12565 	 *	1    0	  1	got /C/ but not linkup yet
   12566 	 *	1    1	  0	(linkup)
   12567 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12568 	 *
   12569 	 */
   12570 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12571 		DPRINTF(sc, WM_DEBUG_LINK,
   12572 		    ("%s: %s: force linkup and fullduplex\n",
   12573 			device_xname(sc->sc_dev), __func__));
   12574 		sc->sc_tbi_linkup = 0;
   12575 		/* Disable auto-negotiation in the TXCW register */
   12576 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12577 
   12578 		/*
   12579 		 * Force link-up and also force full-duplex.
   12580 		 *
   12581 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12582 		 * so we should update sc->sc_ctrl
   12583 		 */
   12584 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12585 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12586 	} else if (((status & STATUS_LU) != 0)
   12587 	    && ((rxcw & RXCW_C) != 0)
   12588 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12589 		sc->sc_tbi_linkup = 1;
   12590 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12591 			device_xname(sc->sc_dev),
   12592 			__func__));
   12593 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12594 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12595 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12596 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   12597 			device_xname(sc->sc_dev), __func__));
   12598 	} else {
   12599 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12600 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12601 			status));
   12602 	}
   12603 
   12604 	return 0;
   12605 }
   12606 
   12607 /*
   12608  * wm_tbi_tick:
   12609  *
   12610  *	Check the link on TBI devices.
   12611  *	This function acts as mii_tick().
   12612  */
   12613 static void
   12614 wm_tbi_tick(struct wm_softc *sc)
   12615 {
   12616 	struct mii_data *mii = &sc->sc_mii;
   12617 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12618 	uint32_t status;
   12619 
   12620 	KASSERT(WM_CORE_LOCKED(sc));
   12621 
   12622 	status = CSR_READ(sc, WMREG_STATUS);
   12623 
   12624 	/* XXX is this needed? */
   12625 	(void)CSR_READ(sc, WMREG_RXCW);
   12626 	(void)CSR_READ(sc, WMREG_CTRL);
   12627 
   12628 	/* set link status */
   12629 	if ((status & STATUS_LU) == 0) {
   12630 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12631 			device_xname(sc->sc_dev)));
   12632 		sc->sc_tbi_linkup = 0;
   12633 	} else if (sc->sc_tbi_linkup == 0) {
   12634 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12635 			device_xname(sc->sc_dev),
   12636 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12637 		sc->sc_tbi_linkup = 1;
   12638 		sc->sc_tbi_serdes_ticks = 0;
   12639 	}
   12640 
   12641 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12642 		goto setled;
   12643 
   12644 	if ((status & STATUS_LU) == 0) {
   12645 		sc->sc_tbi_linkup = 0;
   12646 		/* If the timer expired, retry autonegotiation */
   12647 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12648 		    && (++sc->sc_tbi_serdes_ticks
   12649 			>= sc->sc_tbi_serdes_anegticks)) {
   12650 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12651 				device_xname(sc->sc_dev), __func__));
   12652 			sc->sc_tbi_serdes_ticks = 0;
   12653 			/*
   12654 			 * Reset the link, and let autonegotiation do
   12655 			 * its thing
   12656 			 */
   12657 			sc->sc_ctrl |= CTRL_LRST;
   12658 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12659 			CSR_WRITE_FLUSH(sc);
   12660 			delay(1000);
   12661 			sc->sc_ctrl &= ~CTRL_LRST;
   12662 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12663 			CSR_WRITE_FLUSH(sc);
   12664 			delay(1000);
   12665 			CSR_WRITE(sc, WMREG_TXCW,
   12666 			    sc->sc_txcw & ~TXCW_ANE);
   12667 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12668 		}
   12669 	}
   12670 
   12671 setled:
   12672 	wm_tbi_serdes_set_linkled(sc);
   12673 }
   12674 
   12675 /* SERDES related */
   12676 static void
   12677 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12678 {
   12679 	uint32_t reg;
   12680 
   12681 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12682 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12683 		return;
   12684 
   12685 	/* Enable PCS to turn on link */
   12686 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12687 	reg |= PCS_CFG_PCS_EN;
   12688 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12689 
   12690 	/* Power up the laser */
   12691 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12692 	reg &= ~CTRL_EXT_SWDPIN(3);
   12693 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12694 
   12695 	/* Flush the write to verify completion */
   12696 	CSR_WRITE_FLUSH(sc);
   12697 	delay(1000);
   12698 }
   12699 
   12700 static int
   12701 wm_serdes_mediachange(struct ifnet *ifp)
   12702 {
   12703 	struct wm_softc *sc = ifp->if_softc;
   12704 	bool pcs_autoneg = true; /* XXX */
   12705 	uint32_t ctrl_ext, pcs_lctl, reg;
   12706 
   12707 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12708 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12709 		return 0;
   12710 
   12711 	/* XXX Currently, this function is not called on 8257[12] */
   12712 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12713 	    || (sc->sc_type >= WM_T_82575))
   12714 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12715 
   12716 	/* Power on the sfp cage if present */
   12717 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12718 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12719 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12720 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12721 
   12722 	sc->sc_ctrl |= CTRL_SLU;
   12723 
   12724 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   12725 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12726 
   12727 		reg = CSR_READ(sc, WMREG_CONNSW);
   12728 		reg |= CONNSW_ENRGSRC;
   12729 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   12730 	}
   12731 
   12732 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12733 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12734 	case CTRL_EXT_LINK_MODE_SGMII:
   12735 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12736 		pcs_autoneg = true;
   12737 		/* Autoneg time out should be disabled for SGMII mode */
   12738 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12739 		break;
   12740 	case CTRL_EXT_LINK_MODE_1000KX:
   12741 		pcs_autoneg = false;
   12742 		/* FALLTHROUGH */
   12743 	default:
   12744 		if ((sc->sc_type == WM_T_82575)
   12745 		    || (sc->sc_type == WM_T_82576)) {
   12746 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12747 				pcs_autoneg = false;
   12748 		}
   12749 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12750 		    | CTRL_FRCFDX;
   12751 
   12752 		/* Set speed of 1000/Full if speed/duplex is forced */
   12753 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12754 	}
   12755 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12756 
   12757 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12758 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12759 
   12760 	if (pcs_autoneg) {
   12761 		/* Set PCS register for autoneg */
   12762 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12763 
   12764 		/* Disable force flow control for autoneg */
   12765 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12766 
   12767 		/* Configure flow control advertisement for autoneg */
   12768 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12769 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12770 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12771 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12772 	} else
   12773 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12774 
   12775 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12776 
   12777 	return 0;
   12778 }
   12779 
   12780 static void
   12781 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12782 {
   12783 	struct wm_softc *sc = ifp->if_softc;
   12784 	struct mii_data *mii = &sc->sc_mii;
   12785 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12786 	uint32_t pcs_adv, pcs_lpab, reg;
   12787 
   12788 	ifmr->ifm_status = IFM_AVALID;
   12789 	ifmr->ifm_active = IFM_ETHER;
   12790 
   12791 	/* Check PCS */
   12792 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12793 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12794 		ifmr->ifm_active |= IFM_NONE;
   12795 		sc->sc_tbi_linkup = 0;
   12796 		goto setled;
   12797 	}
   12798 
   12799 	sc->sc_tbi_linkup = 1;
   12800 	ifmr->ifm_status |= IFM_ACTIVE;
   12801 	if (sc->sc_type == WM_T_I354) {
   12802 		uint32_t status;
   12803 
   12804 		status = CSR_READ(sc, WMREG_STATUS);
   12805 		if (((status & STATUS_2P5_SKU) != 0)
   12806 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12807 			ifmr->ifm_active |= IFM_2500_KX;
   12808 		} else
   12809 			ifmr->ifm_active |= IFM_1000_KX;
   12810 	} else {
   12811 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12812 		case PCS_LSTS_SPEED_10:
   12813 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12814 			break;
   12815 		case PCS_LSTS_SPEED_100:
   12816 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12817 			break;
   12818 		case PCS_LSTS_SPEED_1000:
   12819 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12820 			break;
   12821 		default:
   12822 			device_printf(sc->sc_dev, "Unknown speed\n");
   12823 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12824 			break;
   12825 		}
   12826 	}
   12827 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   12828 	if ((reg & PCS_LSTS_FDX) != 0)
   12829 		ifmr->ifm_active |= IFM_FDX;
   12830 	else
   12831 		ifmr->ifm_active |= IFM_HDX;
   12832 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12833 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12834 		/* Check flow */
   12835 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12836 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12837 			DPRINTF(sc, WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12838 			goto setled;
   12839 		}
   12840 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12841 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12842 		DPRINTF(sc, WM_DEBUG_LINK,
   12843 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12844 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12845 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12846 			mii->mii_media_active |= IFM_FLOW
   12847 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12848 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12849 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12850 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12851 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12852 			mii->mii_media_active |= IFM_FLOW
   12853 			    | IFM_ETH_TXPAUSE;
   12854 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12855 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12856 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12857 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12858 			mii->mii_media_active |= IFM_FLOW
   12859 			    | IFM_ETH_RXPAUSE;
   12860 		}
   12861 	}
   12862 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12863 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12864 setled:
   12865 	wm_tbi_serdes_set_linkled(sc);
   12866 }
   12867 
   12868 /*
   12869  * wm_serdes_tick:
   12870  *
   12871  *	Check the link on serdes devices.
   12872  */
   12873 static void
   12874 wm_serdes_tick(struct wm_softc *sc)
   12875 {
   12876 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12877 	struct mii_data *mii = &sc->sc_mii;
   12878 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12879 	uint32_t reg;
   12880 
   12881 	KASSERT(WM_CORE_LOCKED(sc));
   12882 
   12883 	mii->mii_media_status = IFM_AVALID;
   12884 	mii->mii_media_active = IFM_ETHER;
   12885 
   12886 	/* Check PCS */
   12887 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12888 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12889 		mii->mii_media_status |= IFM_ACTIVE;
   12890 		sc->sc_tbi_linkup = 1;
   12891 		sc->sc_tbi_serdes_ticks = 0;
   12892 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12893 		if ((reg & PCS_LSTS_FDX) != 0)
   12894 			mii->mii_media_active |= IFM_FDX;
   12895 		else
   12896 			mii->mii_media_active |= IFM_HDX;
   12897 	} else {
   12898 		mii->mii_media_status |= IFM_NONE;
   12899 		sc->sc_tbi_linkup = 0;
   12900 		/* If the timer expired, retry autonegotiation */
   12901 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12902 		    && (++sc->sc_tbi_serdes_ticks
   12903 			>= sc->sc_tbi_serdes_anegticks)) {
   12904 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12905 				device_xname(sc->sc_dev), __func__));
   12906 			sc->sc_tbi_serdes_ticks = 0;
   12907 			/* XXX */
   12908 			wm_serdes_mediachange(ifp);
   12909 		}
   12910 	}
   12911 
   12912 	wm_tbi_serdes_set_linkled(sc);
   12913 }
   12914 
   12915 /* SFP related */
   12916 
   12917 static int
   12918 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12919 {
   12920 	uint32_t i2ccmd;
   12921 	int i;
   12922 
   12923 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12924 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12925 
   12926 	/* Poll the ready bit */
   12927 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12928 		delay(50);
   12929 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12930 		if (i2ccmd & I2CCMD_READY)
   12931 			break;
   12932 	}
   12933 	if ((i2ccmd & I2CCMD_READY) == 0)
   12934 		return -1;
   12935 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12936 		return -1;
   12937 
   12938 	*data = i2ccmd & 0x00ff;
   12939 
   12940 	return 0;
   12941 }
   12942 
   12943 static uint32_t
   12944 wm_sfp_get_media_type(struct wm_softc *sc)
   12945 {
   12946 	uint32_t ctrl_ext;
   12947 	uint8_t val = 0;
   12948 	int timeout = 3;
   12949 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12950 	int rv = -1;
   12951 
   12952 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12953 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12954 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12955 	CSR_WRITE_FLUSH(sc);
   12956 
   12957 	/* Read SFP module data */
   12958 	while (timeout) {
   12959 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12960 		if (rv == 0)
   12961 			break;
   12962 		delay(100*1000); /* XXX too big */
   12963 		timeout--;
   12964 	}
   12965 	if (rv != 0)
   12966 		goto out;
   12967 
   12968 	switch (val) {
   12969 	case SFF_SFP_ID_SFF:
   12970 		aprint_normal_dev(sc->sc_dev,
   12971 		    "Module/Connector soldered to board\n");
   12972 		break;
   12973 	case SFF_SFP_ID_SFP:
   12974 		sc->sc_flags |= WM_F_SFP;
   12975 		break;
   12976 	case SFF_SFP_ID_UNKNOWN:
   12977 		goto out;
   12978 	default:
   12979 		break;
   12980 	}
   12981 
   12982 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12983 	if (rv != 0)
   12984 		goto out;
   12985 
   12986 	sc->sc_sfptype = val;
   12987 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12988 		mediatype = WM_MEDIATYPE_SERDES;
   12989 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12990 		sc->sc_flags |= WM_F_SGMII;
   12991 		mediatype = WM_MEDIATYPE_COPPER;
   12992 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12993 		sc->sc_flags |= WM_F_SGMII;
   12994 		mediatype = WM_MEDIATYPE_SERDES;
   12995 	} else {
   12996 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   12997 		    __func__, sc->sc_sfptype);
   12998 		sc->sc_sfptype = 0; /* XXX unknown */
   12999 	}
   13000 
   13001 out:
   13002 	/* Restore I2C interface setting */
   13003 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13004 
   13005 	return mediatype;
   13006 }
   13007 
   13008 /*
   13009  * NVM related.
   13010  * Microwire, SPI (w/wo EERD) and Flash.
   13011  */
   13012 
   13013 /* Both spi and uwire */
   13014 
   13015 /*
   13016  * wm_eeprom_sendbits:
   13017  *
   13018  *	Send a series of bits to the EEPROM.
   13019  */
   13020 static void
   13021 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   13022 {
   13023 	uint32_t reg;
   13024 	int x;
   13025 
   13026 	reg = CSR_READ(sc, WMREG_EECD);
   13027 
   13028 	for (x = nbits; x > 0; x--) {
   13029 		if (bits & (1U << (x - 1)))
   13030 			reg |= EECD_DI;
   13031 		else
   13032 			reg &= ~EECD_DI;
   13033 		CSR_WRITE(sc, WMREG_EECD, reg);
   13034 		CSR_WRITE_FLUSH(sc);
   13035 		delay(2);
   13036 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13037 		CSR_WRITE_FLUSH(sc);
   13038 		delay(2);
   13039 		CSR_WRITE(sc, WMREG_EECD, reg);
   13040 		CSR_WRITE_FLUSH(sc);
   13041 		delay(2);
   13042 	}
   13043 }
   13044 
   13045 /*
   13046  * wm_eeprom_recvbits:
   13047  *
   13048  *	Receive a series of bits from the EEPROM.
   13049  */
   13050 static void
   13051 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13052 {
   13053 	uint32_t reg, val;
   13054 	int x;
   13055 
   13056 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13057 
   13058 	val = 0;
   13059 	for (x = nbits; x > 0; x--) {
   13060 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13061 		CSR_WRITE_FLUSH(sc);
   13062 		delay(2);
   13063 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13064 			val |= (1U << (x - 1));
   13065 		CSR_WRITE(sc, WMREG_EECD, reg);
   13066 		CSR_WRITE_FLUSH(sc);
   13067 		delay(2);
   13068 	}
   13069 	*valp = val;
   13070 }
   13071 
   13072 /* Microwire */
   13073 
   13074 /*
   13075  * wm_nvm_read_uwire:
   13076  *
   13077  *	Read a word from the EEPROM using the MicroWire protocol.
   13078  */
   13079 static int
   13080 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13081 {
   13082 	uint32_t reg, val;
   13083 	int i;
   13084 
   13085 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13086 		device_xname(sc->sc_dev), __func__));
   13087 
   13088 	if (sc->nvm.acquire(sc) != 0)
   13089 		return -1;
   13090 
   13091 	for (i = 0; i < wordcnt; i++) {
   13092 		/* Clear SK and DI. */
   13093 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13094 		CSR_WRITE(sc, WMREG_EECD, reg);
   13095 
   13096 		/*
   13097 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13098 		 * and Xen.
   13099 		 *
   13100 		 * We use this workaround only for 82540 because qemu's
   13101 		 * e1000 act as 82540.
   13102 		 */
   13103 		if (sc->sc_type == WM_T_82540) {
   13104 			reg |= EECD_SK;
   13105 			CSR_WRITE(sc, WMREG_EECD, reg);
   13106 			reg &= ~EECD_SK;
   13107 			CSR_WRITE(sc, WMREG_EECD, reg);
   13108 			CSR_WRITE_FLUSH(sc);
   13109 			delay(2);
   13110 		}
   13111 		/* XXX: end of workaround */
   13112 
   13113 		/* Set CHIP SELECT. */
   13114 		reg |= EECD_CS;
   13115 		CSR_WRITE(sc, WMREG_EECD, reg);
   13116 		CSR_WRITE_FLUSH(sc);
   13117 		delay(2);
   13118 
   13119 		/* Shift in the READ command. */
   13120 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13121 
   13122 		/* Shift in address. */
   13123 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13124 
   13125 		/* Shift out the data. */
   13126 		wm_eeprom_recvbits(sc, &val, 16);
   13127 		data[i] = val & 0xffff;
   13128 
   13129 		/* Clear CHIP SELECT. */
   13130 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13131 		CSR_WRITE(sc, WMREG_EECD, reg);
   13132 		CSR_WRITE_FLUSH(sc);
   13133 		delay(2);
   13134 	}
   13135 
   13136 	sc->nvm.release(sc);
   13137 	return 0;
   13138 }
   13139 
   13140 /* SPI */
   13141 
   13142 /*
   13143  * Set SPI and FLASH related information from the EECD register.
   13144  * For 82541 and 82547, the word size is taken from EEPROM.
   13145  */
   13146 static int
   13147 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13148 {
   13149 	int size;
   13150 	uint32_t reg;
   13151 	uint16_t data;
   13152 
   13153 	reg = CSR_READ(sc, WMREG_EECD);
   13154 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13155 
   13156 	/* Read the size of NVM from EECD by default */
   13157 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13158 	switch (sc->sc_type) {
   13159 	case WM_T_82541:
   13160 	case WM_T_82541_2:
   13161 	case WM_T_82547:
   13162 	case WM_T_82547_2:
   13163 		/* Set dummy value to access EEPROM */
   13164 		sc->sc_nvm_wordsize = 64;
   13165 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13166 			aprint_error_dev(sc->sc_dev,
   13167 			    "%s: failed to read EEPROM size\n", __func__);
   13168 		}
   13169 		reg = data;
   13170 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13171 		if (size == 0)
   13172 			size = 6; /* 64 word size */
   13173 		else
   13174 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13175 		break;
   13176 	case WM_T_80003:
   13177 	case WM_T_82571:
   13178 	case WM_T_82572:
   13179 	case WM_T_82573: /* SPI case */
   13180 	case WM_T_82574: /* SPI case */
   13181 	case WM_T_82583: /* SPI case */
   13182 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13183 		if (size > 14)
   13184 			size = 14;
   13185 		break;
   13186 	case WM_T_82575:
   13187 	case WM_T_82576:
   13188 	case WM_T_82580:
   13189 	case WM_T_I350:
   13190 	case WM_T_I354:
   13191 	case WM_T_I210:
   13192 	case WM_T_I211:
   13193 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13194 		if (size > 15)
   13195 			size = 15;
   13196 		break;
   13197 	default:
   13198 		aprint_error_dev(sc->sc_dev,
   13199 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13200 		return -1;
   13201 		break;
   13202 	}
   13203 
   13204 	sc->sc_nvm_wordsize = 1 << size;
   13205 
   13206 	return 0;
   13207 }
   13208 
   13209 /*
   13210  * wm_nvm_ready_spi:
   13211  *
   13212  *	Wait for a SPI EEPROM to be ready for commands.
   13213  */
   13214 static int
   13215 wm_nvm_ready_spi(struct wm_softc *sc)
   13216 {
   13217 	uint32_t val;
   13218 	int usec;
   13219 
   13220 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13221 		device_xname(sc->sc_dev), __func__));
   13222 
   13223 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13224 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13225 		wm_eeprom_recvbits(sc, &val, 8);
   13226 		if ((val & SPI_SR_RDY) == 0)
   13227 			break;
   13228 	}
   13229 	if (usec >= SPI_MAX_RETRIES) {
   13230 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13231 		return -1;
   13232 	}
   13233 	return 0;
   13234 }
   13235 
   13236 /*
   13237  * wm_nvm_read_spi:
   13238  *
   13239  *	Read a work from the EEPROM using the SPI protocol.
   13240  */
   13241 static int
   13242 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13243 {
   13244 	uint32_t reg, val;
   13245 	int i;
   13246 	uint8_t opc;
   13247 	int rv = 0;
   13248 
   13249 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13250 		device_xname(sc->sc_dev), __func__));
   13251 
   13252 	if (sc->nvm.acquire(sc) != 0)
   13253 		return -1;
   13254 
   13255 	/* Clear SK and CS. */
   13256 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13257 	CSR_WRITE(sc, WMREG_EECD, reg);
   13258 	CSR_WRITE_FLUSH(sc);
   13259 	delay(2);
   13260 
   13261 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13262 		goto out;
   13263 
   13264 	/* Toggle CS to flush commands. */
   13265 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13266 	CSR_WRITE_FLUSH(sc);
   13267 	delay(2);
   13268 	CSR_WRITE(sc, WMREG_EECD, reg);
   13269 	CSR_WRITE_FLUSH(sc);
   13270 	delay(2);
   13271 
   13272 	opc = SPI_OPC_READ;
   13273 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13274 		opc |= SPI_OPC_A8;
   13275 
   13276 	wm_eeprom_sendbits(sc, opc, 8);
   13277 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13278 
   13279 	for (i = 0; i < wordcnt; i++) {
   13280 		wm_eeprom_recvbits(sc, &val, 16);
   13281 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13282 	}
   13283 
   13284 	/* Raise CS and clear SK. */
   13285 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13286 	CSR_WRITE(sc, WMREG_EECD, reg);
   13287 	CSR_WRITE_FLUSH(sc);
   13288 	delay(2);
   13289 
   13290 out:
   13291 	sc->nvm.release(sc);
   13292 	return rv;
   13293 }
   13294 
   13295 /* Using with EERD */
   13296 
   13297 static int
   13298 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13299 {
   13300 	uint32_t attempts = 100000;
   13301 	uint32_t i, reg = 0;
   13302 	int32_t done = -1;
   13303 
   13304 	for (i = 0; i < attempts; i++) {
   13305 		reg = CSR_READ(sc, rw);
   13306 
   13307 		if (reg & EERD_DONE) {
   13308 			done = 0;
   13309 			break;
   13310 		}
   13311 		delay(5);
   13312 	}
   13313 
   13314 	return done;
   13315 }
   13316 
   13317 static int
   13318 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13319 {
   13320 	int i, eerd = 0;
   13321 	int rv = 0;
   13322 
   13323 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13324 		device_xname(sc->sc_dev), __func__));
   13325 
   13326 	if (sc->nvm.acquire(sc) != 0)
   13327 		return -1;
   13328 
   13329 	for (i = 0; i < wordcnt; i++) {
   13330 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13331 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13332 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13333 		if (rv != 0) {
   13334 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13335 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13336 			break;
   13337 		}
   13338 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13339 	}
   13340 
   13341 	sc->nvm.release(sc);
   13342 	return rv;
   13343 }
   13344 
   13345 /* Flash */
   13346 
   13347 static int
   13348 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13349 {
   13350 	uint32_t eecd;
   13351 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13352 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13353 	uint32_t nvm_dword = 0;
   13354 	uint8_t sig_byte = 0;
   13355 	int rv;
   13356 
   13357 	switch (sc->sc_type) {
   13358 	case WM_T_PCH_SPT:
   13359 	case WM_T_PCH_CNP:
   13360 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13361 		act_offset = ICH_NVM_SIG_WORD * 2;
   13362 
   13363 		/* Set bank to 0 in case flash read fails. */
   13364 		*bank = 0;
   13365 
   13366 		/* Check bank 0 */
   13367 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13368 		if (rv != 0)
   13369 			return rv;
   13370 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13371 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13372 			*bank = 0;
   13373 			return 0;
   13374 		}
   13375 
   13376 		/* Check bank 1 */
   13377 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13378 		    &nvm_dword);
   13379 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13380 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13381 			*bank = 1;
   13382 			return 0;
   13383 		}
   13384 		aprint_error_dev(sc->sc_dev,
   13385 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13386 		return -1;
   13387 	case WM_T_ICH8:
   13388 	case WM_T_ICH9:
   13389 		eecd = CSR_READ(sc, WMREG_EECD);
   13390 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13391 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13392 			return 0;
   13393 		}
   13394 		/* FALLTHROUGH */
   13395 	default:
   13396 		/* Default to 0 */
   13397 		*bank = 0;
   13398 
   13399 		/* Check bank 0 */
   13400 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13401 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13402 			*bank = 0;
   13403 			return 0;
   13404 		}
   13405 
   13406 		/* Check bank 1 */
   13407 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13408 		    &sig_byte);
   13409 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13410 			*bank = 1;
   13411 			return 0;
   13412 		}
   13413 	}
   13414 
   13415 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13416 		device_xname(sc->sc_dev)));
   13417 	return -1;
   13418 }
   13419 
   13420 /******************************************************************************
   13421  * This function does initial flash setup so that a new read/write/erase cycle
   13422  * can be started.
   13423  *
   13424  * sc - The pointer to the hw structure
   13425  ****************************************************************************/
   13426 static int32_t
   13427 wm_ich8_cycle_init(struct wm_softc *sc)
   13428 {
   13429 	uint16_t hsfsts;
   13430 	int32_t error = 1;
   13431 	int32_t i     = 0;
   13432 
   13433 	if (sc->sc_type >= WM_T_PCH_SPT)
   13434 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13435 	else
   13436 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13437 
   13438 	/* May be check the Flash Des Valid bit in Hw status */
   13439 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13440 		return error;
   13441 
   13442 	/* Clear FCERR in Hw status by writing 1 */
   13443 	/* Clear DAEL in Hw status by writing a 1 */
   13444 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13445 
   13446 	if (sc->sc_type >= WM_T_PCH_SPT)
   13447 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13448 	else
   13449 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13450 
   13451 	/*
   13452 	 * Either we should have a hardware SPI cycle in progress bit to check
   13453 	 * against, in order to start a new cycle or FDONE bit should be
   13454 	 * changed in the hardware so that it is 1 after hardware reset, which
   13455 	 * can then be used as an indication whether a cycle is in progress or
   13456 	 * has been completed .. we should also have some software semaphore
   13457 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13458 	 * threads access to those bits can be sequentiallized or a way so that
   13459 	 * 2 threads don't start the cycle at the same time
   13460 	 */
   13461 
   13462 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13463 		/*
   13464 		 * There is no cycle running at present, so we can start a
   13465 		 * cycle
   13466 		 */
   13467 
   13468 		/* Begin by setting Flash Cycle Done. */
   13469 		hsfsts |= HSFSTS_DONE;
   13470 		if (sc->sc_type >= WM_T_PCH_SPT)
   13471 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13472 			    hsfsts & 0xffffUL);
   13473 		else
   13474 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13475 		error = 0;
   13476 	} else {
   13477 		/*
   13478 		 * Otherwise poll for sometime so the current cycle has a
   13479 		 * chance to end before giving up.
   13480 		 */
   13481 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13482 			if (sc->sc_type >= WM_T_PCH_SPT)
   13483 				hsfsts = ICH8_FLASH_READ32(sc,
   13484 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13485 			else
   13486 				hsfsts = ICH8_FLASH_READ16(sc,
   13487 				    ICH_FLASH_HSFSTS);
   13488 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13489 				error = 0;
   13490 				break;
   13491 			}
   13492 			delay(1);
   13493 		}
   13494 		if (error == 0) {
   13495 			/*
   13496 			 * Successful in waiting for previous cycle to timeout,
   13497 			 * now set the Flash Cycle Done.
   13498 			 */
   13499 			hsfsts |= HSFSTS_DONE;
   13500 			if (sc->sc_type >= WM_T_PCH_SPT)
   13501 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13502 				    hsfsts & 0xffffUL);
   13503 			else
   13504 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13505 				    hsfsts);
   13506 		}
   13507 	}
   13508 	return error;
   13509 }
   13510 
   13511 /******************************************************************************
   13512  * This function starts a flash cycle and waits for its completion
   13513  *
   13514  * sc - The pointer to the hw structure
   13515  ****************************************************************************/
   13516 static int32_t
   13517 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13518 {
   13519 	uint16_t hsflctl;
   13520 	uint16_t hsfsts;
   13521 	int32_t error = 1;
   13522 	uint32_t i = 0;
   13523 
   13524 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13525 	if (sc->sc_type >= WM_T_PCH_SPT)
   13526 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13527 	else
   13528 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13529 	hsflctl |= HSFCTL_GO;
   13530 	if (sc->sc_type >= WM_T_PCH_SPT)
   13531 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13532 		    (uint32_t)hsflctl << 16);
   13533 	else
   13534 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13535 
   13536 	/* Wait till FDONE bit is set to 1 */
   13537 	do {
   13538 		if (sc->sc_type >= WM_T_PCH_SPT)
   13539 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13540 			    & 0xffffUL;
   13541 		else
   13542 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13543 		if (hsfsts & HSFSTS_DONE)
   13544 			break;
   13545 		delay(1);
   13546 		i++;
   13547 	} while (i < timeout);
   13548 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13549 		error = 0;
   13550 
   13551 	return error;
   13552 }
   13553 
   13554 /******************************************************************************
   13555  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13556  *
   13557  * sc - The pointer to the hw structure
   13558  * index - The index of the byte or word to read.
   13559  * size - Size of data to read, 1=byte 2=word, 4=dword
   13560  * data - Pointer to the word to store the value read.
   13561  *****************************************************************************/
   13562 static int32_t
   13563 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13564     uint32_t size, uint32_t *data)
   13565 {
   13566 	uint16_t hsfsts;
   13567 	uint16_t hsflctl;
   13568 	uint32_t flash_linear_address;
   13569 	uint32_t flash_data = 0;
   13570 	int32_t error = 1;
   13571 	int32_t count = 0;
   13572 
   13573 	if (size < 1  || size > 4 || data == 0x0 ||
   13574 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13575 		return error;
   13576 
   13577 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13578 	    sc->sc_ich8_flash_base;
   13579 
   13580 	do {
   13581 		delay(1);
   13582 		/* Steps */
   13583 		error = wm_ich8_cycle_init(sc);
   13584 		if (error)
   13585 			break;
   13586 
   13587 		if (sc->sc_type >= WM_T_PCH_SPT)
   13588 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13589 			    >> 16;
   13590 		else
   13591 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13592 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13593 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13594 		    & HSFCTL_BCOUNT_MASK;
   13595 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13596 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13597 			/*
   13598 			 * In SPT, This register is in Lan memory space, not
   13599 			 * flash. Therefore, only 32 bit access is supported.
   13600 			 */
   13601 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13602 			    (uint32_t)hsflctl << 16);
   13603 		} else
   13604 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13605 
   13606 		/*
   13607 		 * Write the last 24 bits of index into Flash Linear address
   13608 		 * field in Flash Address
   13609 		 */
   13610 		/* TODO: TBD maybe check the index against the size of flash */
   13611 
   13612 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13613 
   13614 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13615 
   13616 		/*
   13617 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13618 		 * the whole sequence a few more times, else read in (shift in)
   13619 		 * the Flash Data0, the order is least significant byte first
   13620 		 * msb to lsb
   13621 		 */
   13622 		if (error == 0) {
   13623 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13624 			if (size == 1)
   13625 				*data = (uint8_t)(flash_data & 0x000000FF);
   13626 			else if (size == 2)
   13627 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13628 			else if (size == 4)
   13629 				*data = (uint32_t)flash_data;
   13630 			break;
   13631 		} else {
   13632 			/*
   13633 			 * If we've gotten here, then things are probably
   13634 			 * completely hosed, but if the error condition is
   13635 			 * detected, it won't hurt to give it another try...
   13636 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13637 			 */
   13638 			if (sc->sc_type >= WM_T_PCH_SPT)
   13639 				hsfsts = ICH8_FLASH_READ32(sc,
   13640 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13641 			else
   13642 				hsfsts = ICH8_FLASH_READ16(sc,
   13643 				    ICH_FLASH_HSFSTS);
   13644 
   13645 			if (hsfsts & HSFSTS_ERR) {
   13646 				/* Repeat for some time before giving up. */
   13647 				continue;
   13648 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13649 				break;
   13650 		}
   13651 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13652 
   13653 	return error;
   13654 }
   13655 
   13656 /******************************************************************************
   13657  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13658  *
   13659  * sc - pointer to wm_hw structure
   13660  * index - The index of the byte to read.
   13661  * data - Pointer to a byte to store the value read.
   13662  *****************************************************************************/
   13663 static int32_t
   13664 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13665 {
   13666 	int32_t status;
   13667 	uint32_t word = 0;
   13668 
   13669 	status = wm_read_ich8_data(sc, index, 1, &word);
   13670 	if (status == 0)
   13671 		*data = (uint8_t)word;
   13672 	else
   13673 		*data = 0;
   13674 
   13675 	return status;
   13676 }
   13677 
   13678 /******************************************************************************
   13679  * Reads a word from the NVM using the ICH8 flash access registers.
   13680  *
   13681  * sc - pointer to wm_hw structure
   13682  * index - The starting byte index of the word to read.
   13683  * data - Pointer to a word to store the value read.
   13684  *****************************************************************************/
   13685 static int32_t
   13686 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13687 {
   13688 	int32_t status;
   13689 	uint32_t word = 0;
   13690 
   13691 	status = wm_read_ich8_data(sc, index, 2, &word);
   13692 	if (status == 0)
   13693 		*data = (uint16_t)word;
   13694 	else
   13695 		*data = 0;
   13696 
   13697 	return status;
   13698 }
   13699 
   13700 /******************************************************************************
   13701  * Reads a dword from the NVM using the ICH8 flash access registers.
   13702  *
   13703  * sc - pointer to wm_hw structure
   13704  * index - The starting byte index of the word to read.
   13705  * data - Pointer to a word to store the value read.
   13706  *****************************************************************************/
   13707 static int32_t
   13708 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13709 {
   13710 	int32_t status;
   13711 
   13712 	status = wm_read_ich8_data(sc, index, 4, data);
   13713 	return status;
   13714 }
   13715 
   13716 /******************************************************************************
   13717  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13718  * register.
   13719  *
   13720  * sc - Struct containing variables accessed by shared code
   13721  * offset - offset of word in the EEPROM to read
   13722  * data - word read from the EEPROM
   13723  * words - number of words to read
   13724  *****************************************************************************/
   13725 static int
   13726 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13727 {
   13728 	int32_t	 rv = 0;
   13729 	uint32_t flash_bank = 0;
   13730 	uint32_t act_offset = 0;
   13731 	uint32_t bank_offset = 0;
   13732 	uint16_t word = 0;
   13733 	uint16_t i = 0;
   13734 
   13735 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13736 		device_xname(sc->sc_dev), __func__));
   13737 
   13738 	if (sc->nvm.acquire(sc) != 0)
   13739 		return -1;
   13740 
   13741 	/*
   13742 	 * We need to know which is the valid flash bank.  In the event
   13743 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13744 	 * managing flash_bank. So it cannot be trusted and needs
   13745 	 * to be updated with each read.
   13746 	 */
   13747 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13748 	if (rv) {
   13749 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13750 			device_xname(sc->sc_dev)));
   13751 		flash_bank = 0;
   13752 	}
   13753 
   13754 	/*
   13755 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13756 	 * size
   13757 	 */
   13758 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13759 
   13760 	for (i = 0; i < words; i++) {
   13761 		/* The NVM part needs a byte offset, hence * 2 */
   13762 		act_offset = bank_offset + ((offset + i) * 2);
   13763 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13764 		if (rv) {
   13765 			aprint_error_dev(sc->sc_dev,
   13766 			    "%s: failed to read NVM\n", __func__);
   13767 			break;
   13768 		}
   13769 		data[i] = word;
   13770 	}
   13771 
   13772 	sc->nvm.release(sc);
   13773 	return rv;
   13774 }
   13775 
   13776 /******************************************************************************
   13777  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13778  * register.
   13779  *
   13780  * sc - Struct containing variables accessed by shared code
   13781  * offset - offset of word in the EEPROM to read
   13782  * data - word read from the EEPROM
   13783  * words - number of words to read
   13784  *****************************************************************************/
   13785 static int
   13786 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13787 {
   13788 	int32_t	 rv = 0;
   13789 	uint32_t flash_bank = 0;
   13790 	uint32_t act_offset = 0;
   13791 	uint32_t bank_offset = 0;
   13792 	uint32_t dword = 0;
   13793 	uint16_t i = 0;
   13794 
   13795 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13796 		device_xname(sc->sc_dev), __func__));
   13797 
   13798 	if (sc->nvm.acquire(sc) != 0)
   13799 		return -1;
   13800 
   13801 	/*
   13802 	 * We need to know which is the valid flash bank.  In the event
   13803 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13804 	 * managing flash_bank. So it cannot be trusted and needs
   13805 	 * to be updated with each read.
   13806 	 */
   13807 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13808 	if (rv) {
   13809 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13810 			device_xname(sc->sc_dev)));
   13811 		flash_bank = 0;
   13812 	}
   13813 
   13814 	/*
   13815 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13816 	 * size
   13817 	 */
   13818 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13819 
   13820 	for (i = 0; i < words; i++) {
   13821 		/* The NVM part needs a byte offset, hence * 2 */
   13822 		act_offset = bank_offset + ((offset + i) * 2);
   13823 		/* but we must read dword aligned, so mask ... */
   13824 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13825 		if (rv) {
   13826 			aprint_error_dev(sc->sc_dev,
   13827 			    "%s: failed to read NVM\n", __func__);
   13828 			break;
   13829 		}
   13830 		/* ... and pick out low or high word */
   13831 		if ((act_offset & 0x2) == 0)
   13832 			data[i] = (uint16_t)(dword & 0xFFFF);
   13833 		else
   13834 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13835 	}
   13836 
   13837 	sc->nvm.release(sc);
   13838 	return rv;
   13839 }
   13840 
   13841 /* iNVM */
   13842 
   13843 static int
   13844 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13845 {
   13846 	int32_t	 rv = 0;
   13847 	uint32_t invm_dword;
   13848 	uint16_t i;
   13849 	uint8_t record_type, word_address;
   13850 
   13851 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13852 		device_xname(sc->sc_dev), __func__));
   13853 
   13854 	for (i = 0; i < INVM_SIZE; i++) {
   13855 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13856 		/* Get record type */
   13857 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13858 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13859 			break;
   13860 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13861 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13862 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13863 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13864 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13865 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13866 			if (word_address == address) {
   13867 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13868 				rv = 0;
   13869 				break;
   13870 			}
   13871 		}
   13872 	}
   13873 
   13874 	return rv;
   13875 }
   13876 
   13877 static int
   13878 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13879 {
   13880 	int rv = 0;
   13881 	int i;
   13882 
   13883 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13884 		device_xname(sc->sc_dev), __func__));
   13885 
   13886 	if (sc->nvm.acquire(sc) != 0)
   13887 		return -1;
   13888 
   13889 	for (i = 0; i < words; i++) {
   13890 		switch (offset + i) {
   13891 		case NVM_OFF_MACADDR:
   13892 		case NVM_OFF_MACADDR1:
   13893 		case NVM_OFF_MACADDR2:
   13894 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13895 			if (rv != 0) {
   13896 				data[i] = 0xffff;
   13897 				rv = -1;
   13898 			}
   13899 			break;
   13900 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   13901 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13902 			if (rv != 0) {
   13903 				*data = INVM_DEFAULT_AL;
   13904 				rv = 0;
   13905 			}
   13906 			break;
   13907 		case NVM_OFF_CFG2:
   13908 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13909 			if (rv != 0) {
   13910 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13911 				rv = 0;
   13912 			}
   13913 			break;
   13914 		case NVM_OFF_CFG4:
   13915 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13916 			if (rv != 0) {
   13917 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13918 				rv = 0;
   13919 			}
   13920 			break;
   13921 		case NVM_OFF_LED_1_CFG:
   13922 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13923 			if (rv != 0) {
   13924 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13925 				rv = 0;
   13926 			}
   13927 			break;
   13928 		case NVM_OFF_LED_0_2_CFG:
   13929 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13930 			if (rv != 0) {
   13931 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13932 				rv = 0;
   13933 			}
   13934 			break;
   13935 		case NVM_OFF_ID_LED_SETTINGS:
   13936 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13937 			if (rv != 0) {
   13938 				*data = ID_LED_RESERVED_FFFF;
   13939 				rv = 0;
   13940 			}
   13941 			break;
   13942 		default:
   13943 			DPRINTF(sc, WM_DEBUG_NVM,
   13944 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13945 			*data = NVM_RESERVED_WORD;
   13946 			break;
   13947 		}
   13948 	}
   13949 
   13950 	sc->nvm.release(sc);
   13951 	return rv;
   13952 }
   13953 
   13954 /* Lock, detecting NVM type, validate checksum, version and read */
   13955 
   13956 static int
   13957 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13958 {
   13959 	uint32_t eecd = 0;
   13960 
   13961 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13962 	    || sc->sc_type == WM_T_82583) {
   13963 		eecd = CSR_READ(sc, WMREG_EECD);
   13964 
   13965 		/* Isolate bits 15 & 16 */
   13966 		eecd = ((eecd >> 15) & 0x03);
   13967 
   13968 		/* If both bits are set, device is Flash type */
   13969 		if (eecd == 0x03)
   13970 			return 0;
   13971 	}
   13972 	return 1;
   13973 }
   13974 
   13975 static int
   13976 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13977 {
   13978 	uint32_t eec;
   13979 
   13980 	eec = CSR_READ(sc, WMREG_EEC);
   13981 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13982 		return 1;
   13983 
   13984 	return 0;
   13985 }
   13986 
   13987 /*
   13988  * wm_nvm_validate_checksum
   13989  *
   13990  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13991  */
   13992 static int
   13993 wm_nvm_validate_checksum(struct wm_softc *sc)
   13994 {
   13995 	uint16_t checksum;
   13996 	uint16_t eeprom_data;
   13997 #ifdef WM_DEBUG
   13998 	uint16_t csum_wordaddr, valid_checksum;
   13999 #endif
   14000 	int i;
   14001 
   14002 	checksum = 0;
   14003 
   14004 	/* Don't check for I211 */
   14005 	if (sc->sc_type == WM_T_I211)
   14006 		return 0;
   14007 
   14008 #ifdef WM_DEBUG
   14009 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   14010 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   14011 		csum_wordaddr = NVM_OFF_COMPAT;
   14012 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   14013 	} else {
   14014 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   14015 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   14016 	}
   14017 
   14018 	/* Dump EEPROM image for debug */
   14019 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14020 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14021 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   14022 		/* XXX PCH_SPT? */
   14023 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   14024 		if ((eeprom_data & valid_checksum) == 0)
   14025 			DPRINTF(sc, WM_DEBUG_NVM,
   14026 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   14027 				device_xname(sc->sc_dev), eeprom_data,
   14028 				    valid_checksum));
   14029 	}
   14030 
   14031 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   14032 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   14033 		for (i = 0; i < NVM_SIZE; i++) {
   14034 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14035 				printf("XXXX ");
   14036 			else
   14037 				printf("%04hx ", eeprom_data);
   14038 			if (i % 8 == 7)
   14039 				printf("\n");
   14040 		}
   14041 	}
   14042 
   14043 #endif /* WM_DEBUG */
   14044 
   14045 	for (i = 0; i < NVM_SIZE; i++) {
   14046 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14047 			return 1;
   14048 		checksum += eeprom_data;
   14049 	}
   14050 
   14051 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14052 #ifdef WM_DEBUG
   14053 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14054 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14055 #endif
   14056 	}
   14057 
   14058 	return 0;
   14059 }
   14060 
   14061 static void
   14062 wm_nvm_version_invm(struct wm_softc *sc)
   14063 {
   14064 	uint32_t dword;
   14065 
   14066 	/*
   14067 	 * Linux's code to decode version is very strange, so we don't
   14068 	 * obey that algorithm and just use word 61 as the document.
   14069 	 * Perhaps it's not perfect though...
   14070 	 *
   14071 	 * Example:
   14072 	 *
   14073 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14074 	 */
   14075 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14076 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14077 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14078 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14079 }
   14080 
   14081 static void
   14082 wm_nvm_version(struct wm_softc *sc)
   14083 {
   14084 	uint16_t major, minor, build, patch;
   14085 	uint16_t uid0, uid1;
   14086 	uint16_t nvm_data;
   14087 	uint16_t off;
   14088 	bool check_version = false;
   14089 	bool check_optionrom = false;
   14090 	bool have_build = false;
   14091 	bool have_uid = true;
   14092 
   14093 	/*
   14094 	 * Version format:
   14095 	 *
   14096 	 * XYYZ
   14097 	 * X0YZ
   14098 	 * X0YY
   14099 	 *
   14100 	 * Example:
   14101 	 *
   14102 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14103 	 *	82571	0x50a6	5.10.6?
   14104 	 *	82572	0x506a	5.6.10?
   14105 	 *	82572EI	0x5069	5.6.9?
   14106 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14107 	 *		0x2013	2.1.3?
   14108 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14109 	 * ICH8+82567	0x0040	0.4.0?
   14110 	 * ICH9+82566	0x1040	1.4.0?
   14111 	 *ICH10+82567	0x0043	0.4.3?
   14112 	 *  PCH+82577	0x00c1	0.12.1?
   14113 	 * PCH2+82579	0x00d3	0.13.3?
   14114 	 *		0x00d4	0.13.4?
   14115 	 *  LPT+I218	0x0023	0.2.3?
   14116 	 *  SPT+I219	0x0084	0.8.4?
   14117 	 *  CNP+I219	0x0054	0.5.4?
   14118 	 */
   14119 
   14120 	/*
   14121 	 * XXX
   14122 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14123 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   14124 	 */
   14125 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14126 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14127 		have_uid = false;
   14128 
   14129 	switch (sc->sc_type) {
   14130 	case WM_T_82571:
   14131 	case WM_T_82572:
   14132 	case WM_T_82574:
   14133 	case WM_T_82583:
   14134 		check_version = true;
   14135 		check_optionrom = true;
   14136 		have_build = true;
   14137 		break;
   14138 	case WM_T_ICH8:
   14139 	case WM_T_ICH9:
   14140 	case WM_T_ICH10:
   14141 	case WM_T_PCH:
   14142 	case WM_T_PCH2:
   14143 	case WM_T_PCH_LPT:
   14144 	case WM_T_PCH_SPT:
   14145 	case WM_T_PCH_CNP:
   14146 		check_version = true;
   14147 		have_build = true;
   14148 		have_uid = false;
   14149 		break;
   14150 	case WM_T_82575:
   14151 	case WM_T_82576:
   14152 	case WM_T_82580:
   14153 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14154 			check_version = true;
   14155 		break;
   14156 	case WM_T_I211:
   14157 		wm_nvm_version_invm(sc);
   14158 		have_uid = false;
   14159 		goto printver;
   14160 	case WM_T_I210:
   14161 		if (!wm_nvm_flash_presence_i210(sc)) {
   14162 			wm_nvm_version_invm(sc);
   14163 			have_uid = false;
   14164 			goto printver;
   14165 		}
   14166 		/* FALLTHROUGH */
   14167 	case WM_T_I350:
   14168 	case WM_T_I354:
   14169 		check_version = true;
   14170 		check_optionrom = true;
   14171 		break;
   14172 	default:
   14173 		return;
   14174 	}
   14175 	if (check_version
   14176 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14177 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14178 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14179 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14180 			build = nvm_data & NVM_BUILD_MASK;
   14181 			have_build = true;
   14182 		} else
   14183 			minor = nvm_data & 0x00ff;
   14184 
   14185 		/* Decimal */
   14186 		minor = (minor / 16) * 10 + (minor % 16);
   14187 		sc->sc_nvm_ver_major = major;
   14188 		sc->sc_nvm_ver_minor = minor;
   14189 
   14190 printver:
   14191 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14192 		    sc->sc_nvm_ver_minor);
   14193 		if (have_build) {
   14194 			sc->sc_nvm_ver_build = build;
   14195 			aprint_verbose(".%d", build);
   14196 		}
   14197 	}
   14198 
   14199 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14200 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14201 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14202 		/* Option ROM Version */
   14203 		if ((off != 0x0000) && (off != 0xffff)) {
   14204 			int rv;
   14205 
   14206 			off += NVM_COMBO_VER_OFF;
   14207 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14208 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14209 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14210 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14211 				/* 16bits */
   14212 				major = uid0 >> 8;
   14213 				build = (uid0 << 8) | (uid1 >> 8);
   14214 				patch = uid1 & 0x00ff;
   14215 				aprint_verbose(", option ROM Version %d.%d.%d",
   14216 				    major, build, patch);
   14217 			}
   14218 		}
   14219 	}
   14220 
   14221 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14222 		aprint_verbose(", Image Unique ID %08x",
   14223 		    ((uint32_t)uid1 << 16) | uid0);
   14224 }
   14225 
   14226 /*
   14227  * wm_nvm_read:
   14228  *
   14229  *	Read data from the serial EEPROM.
   14230  */
   14231 static int
   14232 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14233 {
   14234 	int rv;
   14235 
   14236 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14237 		device_xname(sc->sc_dev), __func__));
   14238 
   14239 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14240 		return -1;
   14241 
   14242 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14243 
   14244 	return rv;
   14245 }
   14246 
   14247 /*
   14248  * Hardware semaphores.
   14249  * Very complexed...
   14250  */
   14251 
   14252 static int
   14253 wm_get_null(struct wm_softc *sc)
   14254 {
   14255 
   14256 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14257 		device_xname(sc->sc_dev), __func__));
   14258 	return 0;
   14259 }
   14260 
   14261 static void
   14262 wm_put_null(struct wm_softc *sc)
   14263 {
   14264 
   14265 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14266 		device_xname(sc->sc_dev), __func__));
   14267 	return;
   14268 }
   14269 
   14270 static int
   14271 wm_get_eecd(struct wm_softc *sc)
   14272 {
   14273 	uint32_t reg;
   14274 	int x;
   14275 
   14276 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14277 		device_xname(sc->sc_dev), __func__));
   14278 
   14279 	reg = CSR_READ(sc, WMREG_EECD);
   14280 
   14281 	/* Request EEPROM access. */
   14282 	reg |= EECD_EE_REQ;
   14283 	CSR_WRITE(sc, WMREG_EECD, reg);
   14284 
   14285 	/* ..and wait for it to be granted. */
   14286 	for (x = 0; x < 1000; x++) {
   14287 		reg = CSR_READ(sc, WMREG_EECD);
   14288 		if (reg & EECD_EE_GNT)
   14289 			break;
   14290 		delay(5);
   14291 	}
   14292 	if ((reg & EECD_EE_GNT) == 0) {
   14293 		aprint_error_dev(sc->sc_dev,
   14294 		    "could not acquire EEPROM GNT\n");
   14295 		reg &= ~EECD_EE_REQ;
   14296 		CSR_WRITE(sc, WMREG_EECD, reg);
   14297 		return -1;
   14298 	}
   14299 
   14300 	return 0;
   14301 }
   14302 
   14303 static void
   14304 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14305 {
   14306 
   14307 	*eecd |= EECD_SK;
   14308 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14309 	CSR_WRITE_FLUSH(sc);
   14310 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14311 		delay(1);
   14312 	else
   14313 		delay(50);
   14314 }
   14315 
   14316 static void
   14317 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14318 {
   14319 
   14320 	*eecd &= ~EECD_SK;
   14321 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14322 	CSR_WRITE_FLUSH(sc);
   14323 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14324 		delay(1);
   14325 	else
   14326 		delay(50);
   14327 }
   14328 
   14329 static void
   14330 wm_put_eecd(struct wm_softc *sc)
   14331 {
   14332 	uint32_t reg;
   14333 
   14334 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14335 		device_xname(sc->sc_dev), __func__));
   14336 
   14337 	/* Stop nvm */
   14338 	reg = CSR_READ(sc, WMREG_EECD);
   14339 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14340 		/* Pull CS high */
   14341 		reg |= EECD_CS;
   14342 		wm_nvm_eec_clock_lower(sc, &reg);
   14343 	} else {
   14344 		/* CS on Microwire is active-high */
   14345 		reg &= ~(EECD_CS | EECD_DI);
   14346 		CSR_WRITE(sc, WMREG_EECD, reg);
   14347 		wm_nvm_eec_clock_raise(sc, &reg);
   14348 		wm_nvm_eec_clock_lower(sc, &reg);
   14349 	}
   14350 
   14351 	reg = CSR_READ(sc, WMREG_EECD);
   14352 	reg &= ~EECD_EE_REQ;
   14353 	CSR_WRITE(sc, WMREG_EECD, reg);
   14354 
   14355 	return;
   14356 }
   14357 
   14358 /*
   14359  * Get hardware semaphore.
   14360  * Same as e1000_get_hw_semaphore_generic()
   14361  */
   14362 static int
   14363 wm_get_swsm_semaphore(struct wm_softc *sc)
   14364 {
   14365 	int32_t timeout;
   14366 	uint32_t swsm;
   14367 
   14368 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14369 		device_xname(sc->sc_dev), __func__));
   14370 	KASSERT(sc->sc_nvm_wordsize > 0);
   14371 
   14372 retry:
   14373 	/* Get the SW semaphore. */
   14374 	timeout = sc->sc_nvm_wordsize + 1;
   14375 	while (timeout) {
   14376 		swsm = CSR_READ(sc, WMREG_SWSM);
   14377 
   14378 		if ((swsm & SWSM_SMBI) == 0)
   14379 			break;
   14380 
   14381 		delay(50);
   14382 		timeout--;
   14383 	}
   14384 
   14385 	if (timeout == 0) {
   14386 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14387 			/*
   14388 			 * In rare circumstances, the SW semaphore may already
   14389 			 * be held unintentionally. Clear the semaphore once
   14390 			 * before giving up.
   14391 			 */
   14392 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14393 			wm_put_swsm_semaphore(sc);
   14394 			goto retry;
   14395 		}
   14396 		aprint_error_dev(sc->sc_dev,
   14397 		    "could not acquire SWSM SMBI\n");
   14398 		return 1;
   14399 	}
   14400 
   14401 	/* Get the FW semaphore. */
   14402 	timeout = sc->sc_nvm_wordsize + 1;
   14403 	while (timeout) {
   14404 		swsm = CSR_READ(sc, WMREG_SWSM);
   14405 		swsm |= SWSM_SWESMBI;
   14406 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14407 		/* If we managed to set the bit we got the semaphore. */
   14408 		swsm = CSR_READ(sc, WMREG_SWSM);
   14409 		if (swsm & SWSM_SWESMBI)
   14410 			break;
   14411 
   14412 		delay(50);
   14413 		timeout--;
   14414 	}
   14415 
   14416 	if (timeout == 0) {
   14417 		aprint_error_dev(sc->sc_dev,
   14418 		    "could not acquire SWSM SWESMBI\n");
   14419 		/* Release semaphores */
   14420 		wm_put_swsm_semaphore(sc);
   14421 		return 1;
   14422 	}
   14423 	return 0;
   14424 }
   14425 
   14426 /*
   14427  * Put hardware semaphore.
   14428  * Same as e1000_put_hw_semaphore_generic()
   14429  */
   14430 static void
   14431 wm_put_swsm_semaphore(struct wm_softc *sc)
   14432 {
   14433 	uint32_t swsm;
   14434 
   14435 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14436 		device_xname(sc->sc_dev), __func__));
   14437 
   14438 	swsm = CSR_READ(sc, WMREG_SWSM);
   14439 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14440 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14441 }
   14442 
   14443 /*
   14444  * Get SW/FW semaphore.
   14445  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14446  */
   14447 static int
   14448 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14449 {
   14450 	uint32_t swfw_sync;
   14451 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14452 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14453 	int timeout;
   14454 
   14455 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14456 		device_xname(sc->sc_dev), __func__));
   14457 
   14458 	if (sc->sc_type == WM_T_80003)
   14459 		timeout = 50;
   14460 	else
   14461 		timeout = 200;
   14462 
   14463 	while (timeout) {
   14464 		if (wm_get_swsm_semaphore(sc)) {
   14465 			aprint_error_dev(sc->sc_dev,
   14466 			    "%s: failed to get semaphore\n",
   14467 			    __func__);
   14468 			return 1;
   14469 		}
   14470 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14471 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14472 			swfw_sync |= swmask;
   14473 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14474 			wm_put_swsm_semaphore(sc);
   14475 			return 0;
   14476 		}
   14477 		wm_put_swsm_semaphore(sc);
   14478 		delay(5000);
   14479 		timeout--;
   14480 	}
   14481 	device_printf(sc->sc_dev,
   14482 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14483 	    mask, swfw_sync);
   14484 	return 1;
   14485 }
   14486 
   14487 static void
   14488 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14489 {
   14490 	uint32_t swfw_sync;
   14491 
   14492 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14493 		device_xname(sc->sc_dev), __func__));
   14494 
   14495 	while (wm_get_swsm_semaphore(sc) != 0)
   14496 		continue;
   14497 
   14498 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14499 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14500 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14501 
   14502 	wm_put_swsm_semaphore(sc);
   14503 }
   14504 
   14505 static int
   14506 wm_get_nvm_80003(struct wm_softc *sc)
   14507 {
   14508 	int rv;
   14509 
   14510 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14511 		device_xname(sc->sc_dev), __func__));
   14512 
   14513 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14514 		aprint_error_dev(sc->sc_dev,
   14515 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14516 		return rv;
   14517 	}
   14518 
   14519 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14520 	    && (rv = wm_get_eecd(sc)) != 0) {
   14521 		aprint_error_dev(sc->sc_dev,
   14522 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14523 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14524 		return rv;
   14525 	}
   14526 
   14527 	return 0;
   14528 }
   14529 
   14530 static void
   14531 wm_put_nvm_80003(struct wm_softc *sc)
   14532 {
   14533 
   14534 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14535 		device_xname(sc->sc_dev), __func__));
   14536 
   14537 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14538 		wm_put_eecd(sc);
   14539 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14540 }
   14541 
   14542 static int
   14543 wm_get_nvm_82571(struct wm_softc *sc)
   14544 {
   14545 	int rv;
   14546 
   14547 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14548 		device_xname(sc->sc_dev), __func__));
   14549 
   14550 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14551 		return rv;
   14552 
   14553 	switch (sc->sc_type) {
   14554 	case WM_T_82573:
   14555 		break;
   14556 	default:
   14557 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14558 			rv = wm_get_eecd(sc);
   14559 		break;
   14560 	}
   14561 
   14562 	if (rv != 0) {
   14563 		aprint_error_dev(sc->sc_dev,
   14564 		    "%s: failed to get semaphore\n",
   14565 		    __func__);
   14566 		wm_put_swsm_semaphore(sc);
   14567 	}
   14568 
   14569 	return rv;
   14570 }
   14571 
   14572 static void
   14573 wm_put_nvm_82571(struct wm_softc *sc)
   14574 {
   14575 
   14576 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14577 		device_xname(sc->sc_dev), __func__));
   14578 
   14579 	switch (sc->sc_type) {
   14580 	case WM_T_82573:
   14581 		break;
   14582 	default:
   14583 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14584 			wm_put_eecd(sc);
   14585 		break;
   14586 	}
   14587 
   14588 	wm_put_swsm_semaphore(sc);
   14589 }
   14590 
   14591 static int
   14592 wm_get_phy_82575(struct wm_softc *sc)
   14593 {
   14594 
   14595 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14596 		device_xname(sc->sc_dev), __func__));
   14597 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14598 }
   14599 
   14600 static void
   14601 wm_put_phy_82575(struct wm_softc *sc)
   14602 {
   14603 
   14604 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14605 		device_xname(sc->sc_dev), __func__));
   14606 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14607 }
   14608 
   14609 static int
   14610 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14611 {
   14612 	uint32_t ext_ctrl;
   14613 	int timeout = 200;
   14614 
   14615 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14616 		device_xname(sc->sc_dev), __func__));
   14617 
   14618 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14619 	for (timeout = 0; timeout < 200; timeout++) {
   14620 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14621 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14622 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14623 
   14624 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14625 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14626 			return 0;
   14627 		delay(5000);
   14628 	}
   14629 	device_printf(sc->sc_dev,
   14630 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14631 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14632 	return 1;
   14633 }
   14634 
   14635 static void
   14636 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14637 {
   14638 	uint32_t ext_ctrl;
   14639 
   14640 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14641 		device_xname(sc->sc_dev), __func__));
   14642 
   14643 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14644 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14645 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14646 
   14647 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14648 }
   14649 
   14650 static int
   14651 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14652 {
   14653 	uint32_t ext_ctrl;
   14654 	int timeout;
   14655 
   14656 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14657 		device_xname(sc->sc_dev), __func__));
   14658 	mutex_enter(sc->sc_ich_phymtx);
   14659 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14660 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14661 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14662 			break;
   14663 		delay(1000);
   14664 	}
   14665 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14666 		device_printf(sc->sc_dev,
   14667 		    "SW has already locked the resource\n");
   14668 		goto out;
   14669 	}
   14670 
   14671 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14672 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14673 	for (timeout = 0; timeout < 1000; timeout++) {
   14674 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14675 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14676 			break;
   14677 		delay(1000);
   14678 	}
   14679 	if (timeout >= 1000) {
   14680 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14681 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14682 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14683 		goto out;
   14684 	}
   14685 	return 0;
   14686 
   14687 out:
   14688 	mutex_exit(sc->sc_ich_phymtx);
   14689 	return 1;
   14690 }
   14691 
   14692 static void
   14693 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14694 {
   14695 	uint32_t ext_ctrl;
   14696 
   14697 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14698 		device_xname(sc->sc_dev), __func__));
   14699 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14700 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14701 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14702 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14703 	} else {
   14704 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14705 	}
   14706 
   14707 	mutex_exit(sc->sc_ich_phymtx);
   14708 }
   14709 
   14710 static int
   14711 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14712 {
   14713 
   14714 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14715 		device_xname(sc->sc_dev), __func__));
   14716 	mutex_enter(sc->sc_ich_nvmmtx);
   14717 
   14718 	return 0;
   14719 }
   14720 
   14721 static void
   14722 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14723 {
   14724 
   14725 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14726 		device_xname(sc->sc_dev), __func__));
   14727 	mutex_exit(sc->sc_ich_nvmmtx);
   14728 }
   14729 
   14730 static int
   14731 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14732 {
   14733 	int i = 0;
   14734 	uint32_t reg;
   14735 
   14736 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14737 		device_xname(sc->sc_dev), __func__));
   14738 
   14739 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14740 	do {
   14741 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14742 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14743 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14744 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14745 			break;
   14746 		delay(2*1000);
   14747 		i++;
   14748 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14749 
   14750 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14751 		wm_put_hw_semaphore_82573(sc);
   14752 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14753 		    device_xname(sc->sc_dev));
   14754 		return -1;
   14755 	}
   14756 
   14757 	return 0;
   14758 }
   14759 
   14760 static void
   14761 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14762 {
   14763 	uint32_t reg;
   14764 
   14765 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14766 		device_xname(sc->sc_dev), __func__));
   14767 
   14768 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14769 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14770 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14771 }
   14772 
   14773 /*
   14774  * Management mode and power management related subroutines.
   14775  * BMC, AMT, suspend/resume and EEE.
   14776  */
   14777 
   14778 #ifdef WM_WOL
   14779 static int
   14780 wm_check_mng_mode(struct wm_softc *sc)
   14781 {
   14782 	int rv;
   14783 
   14784 	switch (sc->sc_type) {
   14785 	case WM_T_ICH8:
   14786 	case WM_T_ICH9:
   14787 	case WM_T_ICH10:
   14788 	case WM_T_PCH:
   14789 	case WM_T_PCH2:
   14790 	case WM_T_PCH_LPT:
   14791 	case WM_T_PCH_SPT:
   14792 	case WM_T_PCH_CNP:
   14793 		rv = wm_check_mng_mode_ich8lan(sc);
   14794 		break;
   14795 	case WM_T_82574:
   14796 	case WM_T_82583:
   14797 		rv = wm_check_mng_mode_82574(sc);
   14798 		break;
   14799 	case WM_T_82571:
   14800 	case WM_T_82572:
   14801 	case WM_T_82573:
   14802 	case WM_T_80003:
   14803 		rv = wm_check_mng_mode_generic(sc);
   14804 		break;
   14805 	default:
   14806 		/* Noting to do */
   14807 		rv = 0;
   14808 		break;
   14809 	}
   14810 
   14811 	return rv;
   14812 }
   14813 
   14814 static int
   14815 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14816 {
   14817 	uint32_t fwsm;
   14818 
   14819 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14820 
   14821 	if (((fwsm & FWSM_FW_VALID) != 0)
   14822 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14823 		return 1;
   14824 
   14825 	return 0;
   14826 }
   14827 
   14828 static int
   14829 wm_check_mng_mode_82574(struct wm_softc *sc)
   14830 {
   14831 	uint16_t data;
   14832 
   14833 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14834 
   14835 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14836 		return 1;
   14837 
   14838 	return 0;
   14839 }
   14840 
   14841 static int
   14842 wm_check_mng_mode_generic(struct wm_softc *sc)
   14843 {
   14844 	uint32_t fwsm;
   14845 
   14846 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14847 
   14848 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14849 		return 1;
   14850 
   14851 	return 0;
   14852 }
   14853 #endif /* WM_WOL */
   14854 
   14855 static int
   14856 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14857 {
   14858 	uint32_t manc, fwsm, factps;
   14859 
   14860 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14861 		return 0;
   14862 
   14863 	manc = CSR_READ(sc, WMREG_MANC);
   14864 
   14865 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14866 		device_xname(sc->sc_dev), manc));
   14867 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14868 		return 0;
   14869 
   14870 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14871 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14872 		factps = CSR_READ(sc, WMREG_FACTPS);
   14873 		if (((factps & FACTPS_MNGCG) == 0)
   14874 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14875 			return 1;
   14876 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14877 		uint16_t data;
   14878 
   14879 		factps = CSR_READ(sc, WMREG_FACTPS);
   14880 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14881 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14882 			device_xname(sc->sc_dev), factps, data));
   14883 		if (((factps & FACTPS_MNGCG) == 0)
   14884 		    && ((data & NVM_CFG2_MNGM_MASK)
   14885 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14886 			return 1;
   14887 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14888 	    && ((manc & MANC_ASF_EN) == 0))
   14889 		return 1;
   14890 
   14891 	return 0;
   14892 }
   14893 
   14894 static bool
   14895 wm_phy_resetisblocked(struct wm_softc *sc)
   14896 {
   14897 	bool blocked = false;
   14898 	uint32_t reg;
   14899 	int i = 0;
   14900 
   14901 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14902 		device_xname(sc->sc_dev), __func__));
   14903 
   14904 	switch (sc->sc_type) {
   14905 	case WM_T_ICH8:
   14906 	case WM_T_ICH9:
   14907 	case WM_T_ICH10:
   14908 	case WM_T_PCH:
   14909 	case WM_T_PCH2:
   14910 	case WM_T_PCH_LPT:
   14911 	case WM_T_PCH_SPT:
   14912 	case WM_T_PCH_CNP:
   14913 		do {
   14914 			reg = CSR_READ(sc, WMREG_FWSM);
   14915 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14916 				blocked = true;
   14917 				delay(10*1000);
   14918 				continue;
   14919 			}
   14920 			blocked = false;
   14921 		} while (blocked && (i++ < 30));
   14922 		return blocked;
   14923 		break;
   14924 	case WM_T_82571:
   14925 	case WM_T_82572:
   14926 	case WM_T_82573:
   14927 	case WM_T_82574:
   14928 	case WM_T_82583:
   14929 	case WM_T_80003:
   14930 		reg = CSR_READ(sc, WMREG_MANC);
   14931 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14932 			return true;
   14933 		else
   14934 			return false;
   14935 		break;
   14936 	default:
   14937 		/* No problem */
   14938 		break;
   14939 	}
   14940 
   14941 	return false;
   14942 }
   14943 
   14944 static void
   14945 wm_get_hw_control(struct wm_softc *sc)
   14946 {
   14947 	uint32_t reg;
   14948 
   14949 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14950 		device_xname(sc->sc_dev), __func__));
   14951 
   14952 	if (sc->sc_type == WM_T_82573) {
   14953 		reg = CSR_READ(sc, WMREG_SWSM);
   14954 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14955 	} else if (sc->sc_type >= WM_T_82571) {
   14956 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14957 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14958 	}
   14959 }
   14960 
   14961 static void
   14962 wm_release_hw_control(struct wm_softc *sc)
   14963 {
   14964 	uint32_t reg;
   14965 
   14966 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14967 		device_xname(sc->sc_dev), __func__));
   14968 
   14969 	if (sc->sc_type == WM_T_82573) {
   14970 		reg = CSR_READ(sc, WMREG_SWSM);
   14971 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14972 	} else if (sc->sc_type >= WM_T_82571) {
   14973 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14974 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14975 	}
   14976 }
   14977 
   14978 static void
   14979 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14980 {
   14981 	uint32_t reg;
   14982 
   14983 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14984 		device_xname(sc->sc_dev), __func__));
   14985 
   14986 	if (sc->sc_type < WM_T_PCH2)
   14987 		return;
   14988 
   14989 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14990 
   14991 	if (gate)
   14992 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14993 	else
   14994 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14995 
   14996 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14997 }
   14998 
   14999 static int
   15000 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   15001 {
   15002 	uint32_t fwsm, reg;
   15003 	int rv = 0;
   15004 
   15005 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15006 		device_xname(sc->sc_dev), __func__));
   15007 
   15008 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   15009 	wm_gate_hw_phy_config_ich8lan(sc, true);
   15010 
   15011 	/* Disable ULP */
   15012 	wm_ulp_disable(sc);
   15013 
   15014 	/* Acquire PHY semaphore */
   15015 	rv = sc->phy.acquire(sc);
   15016 	if (rv != 0) {
   15017 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15018 		device_xname(sc->sc_dev), __func__));
   15019 		return -1;
   15020 	}
   15021 
   15022 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   15023 	 * inaccessible and resetting the PHY is not blocked, toggle the
   15024 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   15025 	 */
   15026 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15027 	switch (sc->sc_type) {
   15028 	case WM_T_PCH_LPT:
   15029 	case WM_T_PCH_SPT:
   15030 	case WM_T_PCH_CNP:
   15031 		if (wm_phy_is_accessible_pchlan(sc))
   15032 			break;
   15033 
   15034 		/* Before toggling LANPHYPC, see if PHY is accessible by
   15035 		 * forcing MAC to SMBus mode first.
   15036 		 */
   15037 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15038 		reg |= CTRL_EXT_FORCE_SMBUS;
   15039 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15040 #if 0
   15041 		/* XXX Isn't this required??? */
   15042 		CSR_WRITE_FLUSH(sc);
   15043 #endif
   15044 		/* Wait 50 milliseconds for MAC to finish any retries
   15045 		 * that it might be trying to perform from previous
   15046 		 * attempts to acknowledge any phy read requests.
   15047 		 */
   15048 		delay(50 * 1000);
   15049 		/* FALLTHROUGH */
   15050 	case WM_T_PCH2:
   15051 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15052 			break;
   15053 		/* FALLTHROUGH */
   15054 	case WM_T_PCH:
   15055 		if (sc->sc_type == WM_T_PCH)
   15056 			if ((fwsm & FWSM_FW_VALID) != 0)
   15057 				break;
   15058 
   15059 		if (wm_phy_resetisblocked(sc) == true) {
   15060 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15061 			break;
   15062 		}
   15063 
   15064 		/* Toggle LANPHYPC Value bit */
   15065 		wm_toggle_lanphypc_pch_lpt(sc);
   15066 
   15067 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15068 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15069 				break;
   15070 
   15071 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15072 			 * so ensure that the MAC is also out of SMBus mode
   15073 			 */
   15074 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15075 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15076 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15077 
   15078 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15079 				break;
   15080 			rv = -1;
   15081 		}
   15082 		break;
   15083 	default:
   15084 		break;
   15085 	}
   15086 
   15087 	/* Release semaphore */
   15088 	sc->phy.release(sc);
   15089 
   15090 	if (rv == 0) {
   15091 		/* Check to see if able to reset PHY.  Print error if not */
   15092 		if (wm_phy_resetisblocked(sc)) {
   15093 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15094 			goto out;
   15095 		}
   15096 
   15097 		/* Reset the PHY before any access to it.  Doing so, ensures
   15098 		 * that the PHY is in a known good state before we read/write
   15099 		 * PHY registers.  The generic reset is sufficient here,
   15100 		 * because we haven't determined the PHY type yet.
   15101 		 */
   15102 		if (wm_reset_phy(sc) != 0)
   15103 			goto out;
   15104 
   15105 		/* On a successful reset, possibly need to wait for the PHY
   15106 		 * to quiesce to an accessible state before returning control
   15107 		 * to the calling function.  If the PHY does not quiesce, then
   15108 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15109 		 *  the PHY is in.
   15110 		 */
   15111 		if (wm_phy_resetisblocked(sc))
   15112 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15113 	}
   15114 
   15115 out:
   15116 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15117 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15118 		delay(10*1000);
   15119 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15120 	}
   15121 
   15122 	return 0;
   15123 }
   15124 
   15125 static void
   15126 wm_init_manageability(struct wm_softc *sc)
   15127 {
   15128 
   15129 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15130 		device_xname(sc->sc_dev), __func__));
   15131 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15132 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15133 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15134 
   15135 		/* Disable hardware interception of ARP */
   15136 		manc &= ~MANC_ARP_EN;
   15137 
   15138 		/* Enable receiving management packets to the host */
   15139 		if (sc->sc_type >= WM_T_82571) {
   15140 			manc |= MANC_EN_MNG2HOST;
   15141 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15142 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15143 		}
   15144 
   15145 		CSR_WRITE(sc, WMREG_MANC, manc);
   15146 	}
   15147 }
   15148 
   15149 static void
   15150 wm_release_manageability(struct wm_softc *sc)
   15151 {
   15152 
   15153 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15154 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15155 
   15156 		manc |= MANC_ARP_EN;
   15157 		if (sc->sc_type >= WM_T_82571)
   15158 			manc &= ~MANC_EN_MNG2HOST;
   15159 
   15160 		CSR_WRITE(sc, WMREG_MANC, manc);
   15161 	}
   15162 }
   15163 
   15164 static void
   15165 wm_get_wakeup(struct wm_softc *sc)
   15166 {
   15167 
   15168 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15169 	switch (sc->sc_type) {
   15170 	case WM_T_82573:
   15171 	case WM_T_82583:
   15172 		sc->sc_flags |= WM_F_HAS_AMT;
   15173 		/* FALLTHROUGH */
   15174 	case WM_T_80003:
   15175 	case WM_T_82575:
   15176 	case WM_T_82576:
   15177 	case WM_T_82580:
   15178 	case WM_T_I350:
   15179 	case WM_T_I354:
   15180 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15181 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15182 		/* FALLTHROUGH */
   15183 	case WM_T_82541:
   15184 	case WM_T_82541_2:
   15185 	case WM_T_82547:
   15186 	case WM_T_82547_2:
   15187 	case WM_T_82571:
   15188 	case WM_T_82572:
   15189 	case WM_T_82574:
   15190 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15191 		break;
   15192 	case WM_T_ICH8:
   15193 	case WM_T_ICH9:
   15194 	case WM_T_ICH10:
   15195 	case WM_T_PCH:
   15196 	case WM_T_PCH2:
   15197 	case WM_T_PCH_LPT:
   15198 	case WM_T_PCH_SPT:
   15199 	case WM_T_PCH_CNP:
   15200 		sc->sc_flags |= WM_F_HAS_AMT;
   15201 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15202 		break;
   15203 	default:
   15204 		break;
   15205 	}
   15206 
   15207 	/* 1: HAS_MANAGE */
   15208 	if (wm_enable_mng_pass_thru(sc) != 0)
   15209 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15210 
   15211 	/*
   15212 	 * Note that the WOL flags is set after the resetting of the eeprom
   15213 	 * stuff
   15214 	 */
   15215 }
   15216 
   15217 /*
   15218  * Unconfigure Ultra Low Power mode.
   15219  * Only for I217 and newer (see below).
   15220  */
   15221 static int
   15222 wm_ulp_disable(struct wm_softc *sc)
   15223 {
   15224 	uint32_t reg;
   15225 	uint16_t phyreg;
   15226 	int i = 0, rv = 0;
   15227 
   15228 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15229 		device_xname(sc->sc_dev), __func__));
   15230 	/* Exclude old devices */
   15231 	if ((sc->sc_type < WM_T_PCH_LPT)
   15232 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15233 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15234 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15235 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15236 		return 0;
   15237 
   15238 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15239 		/* Request ME un-configure ULP mode in the PHY */
   15240 		reg = CSR_READ(sc, WMREG_H2ME);
   15241 		reg &= ~H2ME_ULP;
   15242 		reg |= H2ME_ENFORCE_SETTINGS;
   15243 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15244 
   15245 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15246 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15247 			if (i++ == 30) {
   15248 				device_printf(sc->sc_dev, "%s timed out\n",
   15249 				    __func__);
   15250 				return -1;
   15251 			}
   15252 			delay(10 * 1000);
   15253 		}
   15254 		reg = CSR_READ(sc, WMREG_H2ME);
   15255 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15256 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15257 
   15258 		return 0;
   15259 	}
   15260 
   15261 	/* Acquire semaphore */
   15262 	rv = sc->phy.acquire(sc);
   15263 	if (rv != 0) {
   15264 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15265 		device_xname(sc->sc_dev), __func__));
   15266 		return -1;
   15267 	}
   15268 
   15269 	/* Toggle LANPHYPC */
   15270 	wm_toggle_lanphypc_pch_lpt(sc);
   15271 
   15272 	/* Unforce SMBus mode in PHY */
   15273 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15274 	if (rv != 0) {
   15275 		uint32_t reg2;
   15276 
   15277 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15278 			__func__);
   15279 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15280 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15281 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15282 		delay(50 * 1000);
   15283 
   15284 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15285 		    &phyreg);
   15286 		if (rv != 0)
   15287 			goto release;
   15288 	}
   15289 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15290 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15291 
   15292 	/* Unforce SMBus mode in MAC */
   15293 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15294 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15295 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15296 
   15297 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15298 	if (rv != 0)
   15299 		goto release;
   15300 	phyreg |= HV_PM_CTRL_K1_ENA;
   15301 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15302 
   15303 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15304 		&phyreg);
   15305 	if (rv != 0)
   15306 		goto release;
   15307 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15308 	    | I218_ULP_CONFIG1_STICKY_ULP
   15309 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15310 	    | I218_ULP_CONFIG1_WOL_HOST
   15311 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15312 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15313 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15314 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15315 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15316 	phyreg |= I218_ULP_CONFIG1_START;
   15317 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15318 
   15319 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15320 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15321 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15322 
   15323 release:
   15324 	/* Release semaphore */
   15325 	sc->phy.release(sc);
   15326 	wm_gmii_reset(sc);
   15327 	delay(50 * 1000);
   15328 
   15329 	return rv;
   15330 }
   15331 
   15332 /* WOL in the newer chipset interfaces (pchlan) */
   15333 static int
   15334 wm_enable_phy_wakeup(struct wm_softc *sc)
   15335 {
   15336 	device_t dev = sc->sc_dev;
   15337 	uint32_t mreg, moff;
   15338 	uint16_t wuce, wuc, wufc, preg;
   15339 	int i, rv;
   15340 
   15341 	KASSERT(sc->sc_type >= WM_T_PCH);
   15342 
   15343 	/* Copy MAC RARs to PHY RARs */
   15344 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15345 
   15346 	/* Activate PHY wakeup */
   15347 	rv = sc->phy.acquire(sc);
   15348 	if (rv != 0) {
   15349 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15350 		    __func__);
   15351 		return rv;
   15352 	}
   15353 
   15354 	/*
   15355 	 * Enable access to PHY wakeup registers.
   15356 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15357 	 */
   15358 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15359 	if (rv != 0) {
   15360 		device_printf(dev,
   15361 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15362 		goto release;
   15363 	}
   15364 
   15365 	/* Copy MAC MTA to PHY MTA */
   15366 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15367 		uint16_t lo, hi;
   15368 
   15369 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15370 		lo = (uint16_t)(mreg & 0xffff);
   15371 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15372 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15373 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15374 	}
   15375 
   15376 	/* Configure PHY Rx Control register */
   15377 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15378 	mreg = CSR_READ(sc, WMREG_RCTL);
   15379 	if (mreg & RCTL_UPE)
   15380 		preg |= BM_RCTL_UPE;
   15381 	if (mreg & RCTL_MPE)
   15382 		preg |= BM_RCTL_MPE;
   15383 	preg &= ~(BM_RCTL_MO_MASK);
   15384 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15385 	if (moff != 0)
   15386 		preg |= moff << BM_RCTL_MO_SHIFT;
   15387 	if (mreg & RCTL_BAM)
   15388 		preg |= BM_RCTL_BAM;
   15389 	if (mreg & RCTL_PMCF)
   15390 		preg |= BM_RCTL_PMCF;
   15391 	mreg = CSR_READ(sc, WMREG_CTRL);
   15392 	if (mreg & CTRL_RFCE)
   15393 		preg |= BM_RCTL_RFCE;
   15394 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15395 
   15396 	wuc = WUC_APME | WUC_PME_EN;
   15397 	wufc = WUFC_MAG;
   15398 	/* Enable PHY wakeup in MAC register */
   15399 	CSR_WRITE(sc, WMREG_WUC,
   15400 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15401 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15402 
   15403 	/* Configure and enable PHY wakeup in PHY registers */
   15404 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15405 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15406 
   15407 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15408 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15409 
   15410 release:
   15411 	sc->phy.release(sc);
   15412 
   15413 	return 0;
   15414 }
   15415 
   15416 /* Power down workaround on D3 */
   15417 static void
   15418 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15419 {
   15420 	uint32_t reg;
   15421 	uint16_t phyreg;
   15422 	int i;
   15423 
   15424 	for (i = 0; i < 2; i++) {
   15425 		/* Disable link */
   15426 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15427 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15428 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15429 
   15430 		/*
   15431 		 * Call gig speed drop workaround on Gig disable before
   15432 		 * accessing any PHY registers
   15433 		 */
   15434 		if (sc->sc_type == WM_T_ICH8)
   15435 			wm_gig_downshift_workaround_ich8lan(sc);
   15436 
   15437 		/* Write VR power-down enable */
   15438 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15439 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15440 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15441 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15442 
   15443 		/* Read it back and test */
   15444 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15445 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15446 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15447 			break;
   15448 
   15449 		/* Issue PHY reset and repeat at most one more time */
   15450 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15451 	}
   15452 }
   15453 
   15454 /*
   15455  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15456  *  @sc: pointer to the HW structure
   15457  *
   15458  *  During S0 to Sx transition, it is possible the link remains at gig
   15459  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15460  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15461  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15462  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15463  *  needs to be written.
   15464  *  Parts that support (and are linked to a partner which support) EEE in
   15465  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15466  *  than 10Mbps w/o EEE.
   15467  */
   15468 static void
   15469 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15470 {
   15471 	device_t dev = sc->sc_dev;
   15472 	struct ethercom *ec = &sc->sc_ethercom;
   15473 	uint32_t phy_ctrl;
   15474 	int rv;
   15475 
   15476 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15477 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15478 
   15479 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15480 
   15481 	if (sc->sc_phytype == WMPHY_I217) {
   15482 		uint16_t devid = sc->sc_pcidevid;
   15483 
   15484 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15485 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15486 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15487 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15488 		    (sc->sc_type >= WM_T_PCH_SPT))
   15489 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15490 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15491 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15492 
   15493 		if (sc->phy.acquire(sc) != 0)
   15494 			goto out;
   15495 
   15496 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15497 			uint16_t eee_advert;
   15498 
   15499 			rv = wm_read_emi_reg_locked(dev,
   15500 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15501 			if (rv)
   15502 				goto release;
   15503 
   15504 			/*
   15505 			 * Disable LPLU if both link partners support 100BaseT
   15506 			 * EEE and 100Full is advertised on both ends of the
   15507 			 * link, and enable Auto Enable LPI since there will
   15508 			 * be no driver to enable LPI while in Sx.
   15509 			 */
   15510 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15511 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15512 				uint16_t anar, phy_reg;
   15513 
   15514 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15515 				    &anar);
   15516 				if (anar & ANAR_TX_FD) {
   15517 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15518 					    PHY_CTRL_NOND0A_LPLU);
   15519 
   15520 					/* Set Auto Enable LPI after link up */
   15521 					sc->phy.readreg_locked(dev, 2,
   15522 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15523 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15524 					sc->phy.writereg_locked(dev, 2,
   15525 					    I217_LPI_GPIO_CTRL, phy_reg);
   15526 				}
   15527 			}
   15528 		}
   15529 
   15530 		/*
   15531 		 * For i217 Intel Rapid Start Technology support,
   15532 		 * when the system is going into Sx and no manageability engine
   15533 		 * is present, the driver must configure proxy to reset only on
   15534 		 * power good.	LPI (Low Power Idle) state must also reset only
   15535 		 * on power good, as well as the MTA (Multicast table array).
   15536 		 * The SMBus release must also be disabled on LCD reset.
   15537 		 */
   15538 
   15539 		/*
   15540 		 * Enable MTA to reset for Intel Rapid Start Technology
   15541 		 * Support
   15542 		 */
   15543 
   15544 release:
   15545 		sc->phy.release(sc);
   15546 	}
   15547 out:
   15548 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15549 
   15550 	if (sc->sc_type == WM_T_ICH8)
   15551 		wm_gig_downshift_workaround_ich8lan(sc);
   15552 
   15553 	if (sc->sc_type >= WM_T_PCH) {
   15554 		wm_oem_bits_config_ich8lan(sc, false);
   15555 
   15556 		/* Reset PHY to activate OEM bits on 82577/8 */
   15557 		if (sc->sc_type == WM_T_PCH)
   15558 			wm_reset_phy(sc);
   15559 
   15560 		if (sc->phy.acquire(sc) != 0)
   15561 			return;
   15562 		wm_write_smbus_addr(sc);
   15563 		sc->phy.release(sc);
   15564 	}
   15565 }
   15566 
   15567 /*
   15568  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15569  *  @sc: pointer to the HW structure
   15570  *
   15571  *  During Sx to S0 transitions on non-managed devices or managed devices
   15572  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15573  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15574  *  the PHY.
   15575  *  On i217, setup Intel Rapid Start Technology.
   15576  */
   15577 static int
   15578 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15579 {
   15580 	device_t dev = sc->sc_dev;
   15581 	int rv;
   15582 
   15583 	if (sc->sc_type < WM_T_PCH2)
   15584 		return 0;
   15585 
   15586 	rv = wm_init_phy_workarounds_pchlan(sc);
   15587 	if (rv != 0)
   15588 		return -1;
   15589 
   15590 	/* For i217 Intel Rapid Start Technology support when the system
   15591 	 * is transitioning from Sx and no manageability engine is present
   15592 	 * configure SMBus to restore on reset, disable proxy, and enable
   15593 	 * the reset on MTA (Multicast table array).
   15594 	 */
   15595 	if (sc->sc_phytype == WMPHY_I217) {
   15596 		uint16_t phy_reg;
   15597 
   15598 		if (sc->phy.acquire(sc) != 0)
   15599 			return -1;
   15600 
   15601 		/* Clear Auto Enable LPI after link up */
   15602 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15603 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15604 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15605 
   15606 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15607 			/* Restore clear on SMB if no manageability engine
   15608 			 * is present
   15609 			 */
   15610 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15611 			    &phy_reg);
   15612 			if (rv != 0)
   15613 				goto release;
   15614 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15615 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15616 
   15617 			/* Disable Proxy */
   15618 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15619 		}
   15620 		/* Enable reset on MTA */
   15621 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15622 		if (rv != 0)
   15623 			goto release;
   15624 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15625 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15626 
   15627 release:
   15628 		sc->phy.release(sc);
   15629 		return rv;
   15630 	}
   15631 
   15632 	return 0;
   15633 }
   15634 
   15635 static void
   15636 wm_enable_wakeup(struct wm_softc *sc)
   15637 {
   15638 	uint32_t reg, pmreg;
   15639 	pcireg_t pmode;
   15640 	int rv = 0;
   15641 
   15642 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15643 		device_xname(sc->sc_dev), __func__));
   15644 
   15645 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15646 	    &pmreg, NULL) == 0)
   15647 		return;
   15648 
   15649 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15650 		goto pme;
   15651 
   15652 	/* Advertise the wakeup capability */
   15653 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15654 	    | CTRL_SWDPIN(3));
   15655 
   15656 	/* Keep the laser running on fiber adapters */
   15657 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15658 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15659 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15660 		reg |= CTRL_EXT_SWDPIN(3);
   15661 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15662 	}
   15663 
   15664 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15665 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15666 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15667 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15668 		wm_suspend_workarounds_ich8lan(sc);
   15669 
   15670 #if 0	/* For the multicast packet */
   15671 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15672 	reg |= WUFC_MC;
   15673 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15674 #endif
   15675 
   15676 	if (sc->sc_type >= WM_T_PCH) {
   15677 		rv = wm_enable_phy_wakeup(sc);
   15678 		if (rv != 0)
   15679 			goto pme;
   15680 	} else {
   15681 		/* Enable wakeup by the MAC */
   15682 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15683 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15684 	}
   15685 
   15686 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15687 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15688 		|| (sc->sc_type == WM_T_PCH2))
   15689 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15690 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15691 
   15692 pme:
   15693 	/* Request PME */
   15694 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15695 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   15696 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15697 		/* For WOL */
   15698 		pmode |= PCI_PMCSR_PME_EN;
   15699 	} else {
   15700 		/* Disable WOL */
   15701 		pmode &= ~PCI_PMCSR_PME_EN;
   15702 	}
   15703 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15704 }
   15705 
   15706 /* Disable ASPM L0s and/or L1 for workaround */
   15707 static void
   15708 wm_disable_aspm(struct wm_softc *sc)
   15709 {
   15710 	pcireg_t reg, mask = 0;
   15711 	unsigned const char *str = "";
   15712 
   15713 	/*
   15714 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15715 	 * space.
   15716 	 */
   15717 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15718 		return;
   15719 
   15720 	switch (sc->sc_type) {
   15721 	case WM_T_82571:
   15722 	case WM_T_82572:
   15723 		/*
   15724 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15725 		 * State Power management L1 State (ASPM L1).
   15726 		 */
   15727 		mask = PCIE_LCSR_ASPM_L1;
   15728 		str = "L1 is";
   15729 		break;
   15730 	case WM_T_82573:
   15731 	case WM_T_82574:
   15732 	case WM_T_82583:
   15733 		/*
   15734 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15735 		 *
   15736 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15737 		 * some chipset.  The document of 82574 and 82583 says that
   15738 		 * disabling L0s with some specific chipset is sufficient,
   15739 		 * but we follow as of the Intel em driver does.
   15740 		 *
   15741 		 * References:
   15742 		 * Errata 8 of the Specification Update of i82573.
   15743 		 * Errata 20 of the Specification Update of i82574.
   15744 		 * Errata 9 of the Specification Update of i82583.
   15745 		 */
   15746 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15747 		str = "L0s and L1 are";
   15748 		break;
   15749 	default:
   15750 		return;
   15751 	}
   15752 
   15753 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15754 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15755 	reg &= ~mask;
   15756 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15757 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15758 
   15759 	/* Print only in wm_attach() */
   15760 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15761 		aprint_verbose_dev(sc->sc_dev,
   15762 		    "ASPM %s disabled to workaround the errata.\n", str);
   15763 }
   15764 
   15765 /* LPLU */
   15766 
   15767 static void
   15768 wm_lplu_d0_disable(struct wm_softc *sc)
   15769 {
   15770 	struct mii_data *mii = &sc->sc_mii;
   15771 	uint32_t reg;
   15772 	uint16_t phyval;
   15773 
   15774 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15775 		device_xname(sc->sc_dev), __func__));
   15776 
   15777 	if (sc->sc_phytype == WMPHY_IFE)
   15778 		return;
   15779 
   15780 	switch (sc->sc_type) {
   15781 	case WM_T_82571:
   15782 	case WM_T_82572:
   15783 	case WM_T_82573:
   15784 	case WM_T_82575:
   15785 	case WM_T_82576:
   15786 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   15787 		phyval &= ~PMR_D0_LPLU;
   15788 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   15789 		break;
   15790 	case WM_T_82580:
   15791 	case WM_T_I350:
   15792 	case WM_T_I210:
   15793 	case WM_T_I211:
   15794 		reg = CSR_READ(sc, WMREG_PHPM);
   15795 		reg &= ~PHPM_D0A_LPLU;
   15796 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15797 		break;
   15798 	case WM_T_82574:
   15799 	case WM_T_82583:
   15800 	case WM_T_ICH8:
   15801 	case WM_T_ICH9:
   15802 	case WM_T_ICH10:
   15803 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15804 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15805 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15806 		CSR_WRITE_FLUSH(sc);
   15807 		break;
   15808 	case WM_T_PCH:
   15809 	case WM_T_PCH2:
   15810 	case WM_T_PCH_LPT:
   15811 	case WM_T_PCH_SPT:
   15812 	case WM_T_PCH_CNP:
   15813 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15814 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15815 		if (wm_phy_resetisblocked(sc) == false)
   15816 			phyval |= HV_OEM_BITS_ANEGNOW;
   15817 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15818 		break;
   15819 	default:
   15820 		break;
   15821 	}
   15822 }
   15823 
   15824 /* EEE */
   15825 
   15826 static int
   15827 wm_set_eee_i350(struct wm_softc *sc)
   15828 {
   15829 	struct ethercom *ec = &sc->sc_ethercom;
   15830 	uint32_t ipcnfg, eeer;
   15831 	uint32_t ipcnfg_mask
   15832 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15833 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15834 
   15835 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15836 
   15837 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15838 	eeer = CSR_READ(sc, WMREG_EEER);
   15839 
   15840 	/* Enable or disable per user setting */
   15841 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15842 		ipcnfg |= ipcnfg_mask;
   15843 		eeer |= eeer_mask;
   15844 	} else {
   15845 		ipcnfg &= ~ipcnfg_mask;
   15846 		eeer &= ~eeer_mask;
   15847 	}
   15848 
   15849 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15850 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15851 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15852 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15853 
   15854 	return 0;
   15855 }
   15856 
   15857 static int
   15858 wm_set_eee_pchlan(struct wm_softc *sc)
   15859 {
   15860 	device_t dev = sc->sc_dev;
   15861 	struct ethercom *ec = &sc->sc_ethercom;
   15862 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15863 	int rv = 0;
   15864 
   15865 	switch (sc->sc_phytype) {
   15866 	case WMPHY_82579:
   15867 		lpa = I82579_EEE_LP_ABILITY;
   15868 		pcs_status = I82579_EEE_PCS_STATUS;
   15869 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15870 		break;
   15871 	case WMPHY_I217:
   15872 		lpa = I217_EEE_LP_ABILITY;
   15873 		pcs_status = I217_EEE_PCS_STATUS;
   15874 		adv_addr = I217_EEE_ADVERTISEMENT;
   15875 		break;
   15876 	default:
   15877 		return 0;
   15878 	}
   15879 
   15880 	if (sc->phy.acquire(sc)) {
   15881 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15882 		return 0;
   15883 	}
   15884 
   15885 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15886 	if (rv != 0)
   15887 		goto release;
   15888 
   15889 	/* Clear bits that enable EEE in various speeds */
   15890 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15891 
   15892 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15893 		/* Save off link partner's EEE ability */
   15894 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15895 		if (rv != 0)
   15896 			goto release;
   15897 
   15898 		/* Read EEE advertisement */
   15899 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15900 			goto release;
   15901 
   15902 		/*
   15903 		 * Enable EEE only for speeds in which the link partner is
   15904 		 * EEE capable and for which we advertise EEE.
   15905 		 */
   15906 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15907 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15908 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15909 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15910 			if ((data & ANLPAR_TX_FD) != 0)
   15911 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15912 			else {
   15913 				/*
   15914 				 * EEE is not supported in 100Half, so ignore
   15915 				 * partner's EEE in 100 ability if full-duplex
   15916 				 * is not advertised.
   15917 				 */
   15918 				sc->eee_lp_ability
   15919 				    &= ~AN_EEEADVERT_100_TX;
   15920 			}
   15921 		}
   15922 	}
   15923 
   15924 	if (sc->sc_phytype == WMPHY_82579) {
   15925 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15926 		if (rv != 0)
   15927 			goto release;
   15928 
   15929 		data &= ~I82579_LPI_PLL_SHUT_100;
   15930 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15931 	}
   15932 
   15933 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15934 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15935 		goto release;
   15936 
   15937 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15938 release:
   15939 	sc->phy.release(sc);
   15940 
   15941 	return rv;
   15942 }
   15943 
   15944 static int
   15945 wm_set_eee(struct wm_softc *sc)
   15946 {
   15947 	struct ethercom *ec = &sc->sc_ethercom;
   15948 
   15949 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15950 		return 0;
   15951 
   15952 	if (sc->sc_type == WM_T_I354) {
   15953 		/* I354 uses an external PHY */
   15954 		return 0; /* not yet */
   15955 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15956 		return wm_set_eee_i350(sc);
   15957 	else if (sc->sc_type >= WM_T_PCH2)
   15958 		return wm_set_eee_pchlan(sc);
   15959 
   15960 	return 0;
   15961 }
   15962 
   15963 /*
   15964  * Workarounds (mainly PHY related).
   15965  * Basically, PHY's workarounds are in the PHY drivers.
   15966  */
   15967 
   15968 /* Work-around for 82566 Kumeran PCS lock loss */
   15969 static int
   15970 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15971 {
   15972 	struct mii_data *mii = &sc->sc_mii;
   15973 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15974 	int i, reg, rv;
   15975 	uint16_t phyreg;
   15976 
   15977 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15978 		device_xname(sc->sc_dev), __func__));
   15979 
   15980 	/* If the link is not up, do nothing */
   15981 	if ((status & STATUS_LU) == 0)
   15982 		return 0;
   15983 
   15984 	/* Nothing to do if the link is other than 1Gbps */
   15985 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15986 		return 0;
   15987 
   15988 	for (i = 0; i < 10; i++) {
   15989 		/* read twice */
   15990 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15991 		if (rv != 0)
   15992 			return rv;
   15993 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15994 		if (rv != 0)
   15995 			return rv;
   15996 
   15997 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15998 			goto out;	/* GOOD! */
   15999 
   16000 		/* Reset the PHY */
   16001 		wm_reset_phy(sc);
   16002 		delay(5*1000);
   16003 	}
   16004 
   16005 	/* Disable GigE link negotiation */
   16006 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16007 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16008 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16009 
   16010 	/*
   16011 	 * Call gig speed drop workaround on Gig disable before accessing
   16012 	 * any PHY registers.
   16013 	 */
   16014 	wm_gig_downshift_workaround_ich8lan(sc);
   16015 
   16016 out:
   16017 	return 0;
   16018 }
   16019 
   16020 /*
   16021  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   16022  *  @sc: pointer to the HW structure
   16023  *
   16024  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   16025  *  LPLU, Gig disable, MDIC PHY reset):
   16026  *    1) Set Kumeran Near-end loopback
   16027  *    2) Clear Kumeran Near-end loopback
   16028  *  Should only be called for ICH8[m] devices with any 1G Phy.
   16029  */
   16030 static void
   16031 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   16032 {
   16033 	uint16_t kmreg;
   16034 
   16035 	/* Only for igp3 */
   16036 	if (sc->sc_phytype == WMPHY_IGP_3) {
   16037 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   16038 			return;
   16039 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   16040 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   16041 			return;
   16042 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   16043 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   16044 	}
   16045 }
   16046 
   16047 /*
   16048  * Workaround for pch's PHYs
   16049  * XXX should be moved to new PHY driver?
   16050  */
   16051 static int
   16052 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16053 {
   16054 	device_t dev = sc->sc_dev;
   16055 	struct mii_data *mii = &sc->sc_mii;
   16056 	struct mii_softc *child;
   16057 	uint16_t phy_data, phyrev = 0;
   16058 	int phytype = sc->sc_phytype;
   16059 	int rv;
   16060 
   16061 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16062 		device_xname(dev), __func__));
   16063 	KASSERT(sc->sc_type == WM_T_PCH);
   16064 
   16065 	/* Set MDIO slow mode before any other MDIO access */
   16066 	if (phytype == WMPHY_82577)
   16067 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16068 			return rv;
   16069 
   16070 	child = LIST_FIRST(&mii->mii_phys);
   16071 	if (child != NULL)
   16072 		phyrev = child->mii_mpd_rev;
   16073 
   16074 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16075 	if ((child != NULL) &&
   16076 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16077 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16078 		/* Disable generation of early preamble (0x4431) */
   16079 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16080 		    &phy_data);
   16081 		if (rv != 0)
   16082 			return rv;
   16083 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16084 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16085 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16086 		    phy_data);
   16087 		if (rv != 0)
   16088 			return rv;
   16089 
   16090 		/* Preamble tuning for SSC */
   16091 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16092 		if (rv != 0)
   16093 			return rv;
   16094 	}
   16095 
   16096 	/* 82578 */
   16097 	if (phytype == WMPHY_82578) {
   16098 		/*
   16099 		 * Return registers to default by doing a soft reset then
   16100 		 * writing 0x3140 to the control register
   16101 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16102 		 */
   16103 		if ((child != NULL) && (phyrev < 2)) {
   16104 			PHY_RESET(child);
   16105 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16106 			if (rv != 0)
   16107 				return rv;
   16108 		}
   16109 	}
   16110 
   16111 	/* Select page 0 */
   16112 	if ((rv = sc->phy.acquire(sc)) != 0)
   16113 		return rv;
   16114 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16115 	sc->phy.release(sc);
   16116 	if (rv != 0)
   16117 		return rv;
   16118 
   16119 	/*
   16120 	 * Configure the K1 Si workaround during phy reset assuming there is
   16121 	 * link so that it disables K1 if link is in 1Gbps.
   16122 	 */
   16123 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16124 		return rv;
   16125 
   16126 	/* Workaround for link disconnects on a busy hub in half duplex */
   16127 	rv = sc->phy.acquire(sc);
   16128 	if (rv)
   16129 		return rv;
   16130 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16131 	if (rv)
   16132 		goto release;
   16133 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16134 	    phy_data & 0x00ff);
   16135 	if (rv)
   16136 		goto release;
   16137 
   16138 	/* Set MSE higher to enable link to stay up when noise is high */
   16139 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16140 release:
   16141 	sc->phy.release(sc);
   16142 
   16143 	return rv;
   16144 }
   16145 
   16146 /*
   16147  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16148  *  @sc:   pointer to the HW structure
   16149  */
   16150 static void
   16151 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16152 {
   16153 
   16154 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16155 		device_xname(sc->sc_dev), __func__));
   16156 
   16157 	if (sc->phy.acquire(sc) != 0)
   16158 		return;
   16159 
   16160 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16161 
   16162 	sc->phy.release(sc);
   16163 }
   16164 
   16165 static void
   16166 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16167 {
   16168 	device_t dev = sc->sc_dev;
   16169 	uint32_t mac_reg;
   16170 	uint16_t i, wuce;
   16171 	int count;
   16172 
   16173 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16174 		device_xname(dev), __func__));
   16175 
   16176 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16177 		return;
   16178 
   16179 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16180 	count = wm_rar_count(sc);
   16181 	for (i = 0; i < count; i++) {
   16182 		uint16_t lo, hi;
   16183 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16184 		lo = (uint16_t)(mac_reg & 0xffff);
   16185 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16186 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16187 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16188 
   16189 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16190 		lo = (uint16_t)(mac_reg & 0xffff);
   16191 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16192 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16193 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16194 	}
   16195 
   16196 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16197 }
   16198 
   16199 /*
   16200  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16201  *  with 82579 PHY
   16202  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16203  */
   16204 static int
   16205 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16206 {
   16207 	device_t dev = sc->sc_dev;
   16208 	int rar_count;
   16209 	int rv;
   16210 	uint32_t mac_reg;
   16211 	uint16_t dft_ctrl, data;
   16212 	uint16_t i;
   16213 
   16214 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16215 		device_xname(dev), __func__));
   16216 
   16217 	if (sc->sc_type < WM_T_PCH2)
   16218 		return 0;
   16219 
   16220 	/* Acquire PHY semaphore */
   16221 	rv = sc->phy.acquire(sc);
   16222 	if (rv != 0)
   16223 		return rv;
   16224 
   16225 	/* Disable Rx path while enabling/disabling workaround */
   16226 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16227 	if (rv != 0)
   16228 		goto out;
   16229 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16230 	    dft_ctrl | (1 << 14));
   16231 	if (rv != 0)
   16232 		goto out;
   16233 
   16234 	if (enable) {
   16235 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16236 		 * SHRAL/H) and initial CRC values to the MAC
   16237 		 */
   16238 		rar_count = wm_rar_count(sc);
   16239 		for (i = 0; i < rar_count; i++) {
   16240 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16241 			uint32_t addr_high, addr_low;
   16242 
   16243 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16244 			if (!(addr_high & RAL_AV))
   16245 				continue;
   16246 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16247 			mac_addr[0] = (addr_low & 0xFF);
   16248 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16249 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16250 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16251 			mac_addr[4] = (addr_high & 0xFF);
   16252 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16253 
   16254 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16255 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16256 		}
   16257 
   16258 		/* Write Rx addresses to the PHY */
   16259 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16260 	}
   16261 
   16262 	/*
   16263 	 * If enable ==
   16264 	 *	true: Enable jumbo frame workaround in the MAC.
   16265 	 *	false: Write MAC register values back to h/w defaults.
   16266 	 */
   16267 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16268 	if (enable) {
   16269 		mac_reg &= ~(1 << 14);
   16270 		mac_reg |= (7 << 15);
   16271 	} else
   16272 		mac_reg &= ~(0xf << 14);
   16273 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16274 
   16275 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16276 	if (enable) {
   16277 		mac_reg |= RCTL_SECRC;
   16278 		sc->sc_rctl |= RCTL_SECRC;
   16279 		sc->sc_flags |= WM_F_CRC_STRIP;
   16280 	} else {
   16281 		mac_reg &= ~RCTL_SECRC;
   16282 		sc->sc_rctl &= ~RCTL_SECRC;
   16283 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16284 	}
   16285 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16286 
   16287 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16288 	if (rv != 0)
   16289 		goto out;
   16290 	if (enable)
   16291 		data |= 1 << 0;
   16292 	else
   16293 		data &= ~(1 << 0);
   16294 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16295 	if (rv != 0)
   16296 		goto out;
   16297 
   16298 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16299 	if (rv != 0)
   16300 		goto out;
   16301 	/*
   16302 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16303 	 * on both the enable case and the disable case. Is it correct?
   16304 	 */
   16305 	data &= ~(0xf << 8);
   16306 	data |= (0xb << 8);
   16307 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16308 	if (rv != 0)
   16309 		goto out;
   16310 
   16311 	/*
   16312 	 * If enable ==
   16313 	 *	true: Enable jumbo frame workaround in the PHY.
   16314 	 *	false: Write PHY register values back to h/w defaults.
   16315 	 */
   16316 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16317 	if (rv != 0)
   16318 		goto out;
   16319 	data &= ~(0x7F << 5);
   16320 	if (enable)
   16321 		data |= (0x37 << 5);
   16322 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16323 	if (rv != 0)
   16324 		goto out;
   16325 
   16326 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16327 	if (rv != 0)
   16328 		goto out;
   16329 	if (enable)
   16330 		data &= ~(1 << 13);
   16331 	else
   16332 		data |= (1 << 13);
   16333 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16334 	if (rv != 0)
   16335 		goto out;
   16336 
   16337 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16338 	if (rv != 0)
   16339 		goto out;
   16340 	data &= ~(0x3FF << 2);
   16341 	if (enable)
   16342 		data |= (I82579_TX_PTR_GAP << 2);
   16343 	else
   16344 		data |= (0x8 << 2);
   16345 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16346 	if (rv != 0)
   16347 		goto out;
   16348 
   16349 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16350 	    enable ? 0xf100 : 0x7e00);
   16351 	if (rv != 0)
   16352 		goto out;
   16353 
   16354 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16355 	if (rv != 0)
   16356 		goto out;
   16357 	if (enable)
   16358 		data |= 1 << 10;
   16359 	else
   16360 		data &= ~(1 << 10);
   16361 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16362 	if (rv != 0)
   16363 		goto out;
   16364 
   16365 	/* Re-enable Rx path after enabling/disabling workaround */
   16366 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16367 	    dft_ctrl & ~(1 << 14));
   16368 
   16369 out:
   16370 	sc->phy.release(sc);
   16371 
   16372 	return rv;
   16373 }
   16374 
   16375 /*
   16376  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16377  *  done after every PHY reset.
   16378  */
   16379 static int
   16380 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16381 {
   16382 	device_t dev = sc->sc_dev;
   16383 	int rv;
   16384 
   16385 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16386 		device_xname(dev), __func__));
   16387 	KASSERT(sc->sc_type == WM_T_PCH2);
   16388 
   16389 	/* Set MDIO slow mode before any other MDIO access */
   16390 	rv = wm_set_mdio_slow_mode_hv(sc);
   16391 	if (rv != 0)
   16392 		return rv;
   16393 
   16394 	rv = sc->phy.acquire(sc);
   16395 	if (rv != 0)
   16396 		return rv;
   16397 	/* Set MSE higher to enable link to stay up when noise is high */
   16398 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16399 	if (rv != 0)
   16400 		goto release;
   16401 	/* Drop link after 5 times MSE threshold was reached */
   16402 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16403 release:
   16404 	sc->phy.release(sc);
   16405 
   16406 	return rv;
   16407 }
   16408 
   16409 /**
   16410  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16411  *  @link: link up bool flag
   16412  *
   16413  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16414  *  preventing further DMA write requests.  Workaround the issue by disabling
   16415  *  the de-assertion of the clock request when in 1Gpbs mode.
   16416  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   16417  *  speeds in order to avoid Tx hangs.
   16418  **/
   16419 static int
   16420 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   16421 {
   16422 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   16423 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16424 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   16425 	uint16_t phyreg;
   16426 
   16427 	if (link && (speed == STATUS_SPEED_1000)) {
   16428 		sc->phy.acquire(sc);
   16429 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16430 		    &phyreg);
   16431 		if (rv != 0)
   16432 			goto release;
   16433 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16434 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   16435 		if (rv != 0)
   16436 			goto release;
   16437 		delay(20);
   16438 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   16439 
   16440 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16441 		    &phyreg);
   16442 release:
   16443 		sc->phy.release(sc);
   16444 		return rv;
   16445 	}
   16446 
   16447 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   16448 
   16449 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   16450 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   16451 	    || !link
   16452 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   16453 		goto update_fextnvm6;
   16454 
   16455 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   16456 
   16457 	/* Clear link status transmit timeout */
   16458 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   16459 	if (speed == STATUS_SPEED_100) {
   16460 		/* Set inband Tx timeout to 5x10us for 100Half */
   16461 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16462 
   16463 		/* Do not extend the K1 entry latency for 100Half */
   16464 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16465 	} else {
   16466 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   16467 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16468 
   16469 		/* Extend the K1 entry latency for 10 Mbps */
   16470 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16471 	}
   16472 
   16473 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   16474 
   16475 update_fextnvm6:
   16476 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   16477 	return 0;
   16478 }
   16479 
   16480 /*
   16481  *  wm_k1_gig_workaround_hv - K1 Si workaround
   16482  *  @sc:   pointer to the HW structure
   16483  *  @link: link up bool flag
   16484  *
   16485  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   16486  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   16487  *  If link is down, the function will restore the default K1 setting located
   16488  *  in the NVM.
   16489  */
   16490 static int
   16491 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   16492 {
   16493 	int k1_enable = sc->sc_nvm_k1_enabled;
   16494 
   16495 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16496 		device_xname(sc->sc_dev), __func__));
   16497 
   16498 	if (sc->phy.acquire(sc) != 0)
   16499 		return -1;
   16500 
   16501 	if (link) {
   16502 		k1_enable = 0;
   16503 
   16504 		/* Link stall fix for link up */
   16505 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16506 		    0x0100);
   16507 	} else {
   16508 		/* Link stall fix for link down */
   16509 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16510 		    0x4100);
   16511 	}
   16512 
   16513 	wm_configure_k1_ich8lan(sc, k1_enable);
   16514 	sc->phy.release(sc);
   16515 
   16516 	return 0;
   16517 }
   16518 
   16519 /*
   16520  *  wm_k1_workaround_lv - K1 Si workaround
   16521  *  @sc:   pointer to the HW structure
   16522  *
   16523  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16524  *  Disable K1 for 1000 and 100 speeds
   16525  */
   16526 static int
   16527 wm_k1_workaround_lv(struct wm_softc *sc)
   16528 {
   16529 	uint32_t reg;
   16530 	uint16_t phyreg;
   16531 	int rv;
   16532 
   16533 	if (sc->sc_type != WM_T_PCH2)
   16534 		return 0;
   16535 
   16536 	/* Set K1 beacon duration based on 10Mbps speed */
   16537 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16538 	if (rv != 0)
   16539 		return rv;
   16540 
   16541 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16542 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16543 		if (phyreg &
   16544 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16545 			/* LV 1G/100 Packet drop issue wa  */
   16546 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16547 			    &phyreg);
   16548 			if (rv != 0)
   16549 				return rv;
   16550 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16551 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16552 			    phyreg);
   16553 			if (rv != 0)
   16554 				return rv;
   16555 		} else {
   16556 			/* For 10Mbps */
   16557 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16558 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16559 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16560 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16561 		}
   16562 	}
   16563 
   16564 	return 0;
   16565 }
   16566 
   16567 /*
   16568  *  wm_link_stall_workaround_hv - Si workaround
   16569  *  @sc: pointer to the HW structure
   16570  *
   16571  *  This function works around a Si bug where the link partner can get
   16572  *  a link up indication before the PHY does. If small packets are sent
   16573  *  by the link partner they can be placed in the packet buffer without
   16574  *  being properly accounted for by the PHY and will stall preventing
   16575  *  further packets from being received.  The workaround is to clear the
   16576  *  packet buffer after the PHY detects link up.
   16577  */
   16578 static int
   16579 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16580 {
   16581 	uint16_t phyreg;
   16582 
   16583 	if (sc->sc_phytype != WMPHY_82578)
   16584 		return 0;
   16585 
   16586 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16587 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16588 	if ((phyreg & BMCR_LOOP) != 0)
   16589 		return 0;
   16590 
   16591 	/* Check if link is up and at 1Gbps */
   16592 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16593 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16594 	    | BM_CS_STATUS_SPEED_MASK;
   16595 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16596 		| BM_CS_STATUS_SPEED_1000))
   16597 		return 0;
   16598 
   16599 	delay(200 * 1000);	/* XXX too big */
   16600 
   16601 	/* Flush the packets in the fifo buffer */
   16602 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16603 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16604 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16605 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16606 
   16607 	return 0;
   16608 }
   16609 
   16610 static int
   16611 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16612 {
   16613 	int rv;
   16614 	uint16_t reg;
   16615 
   16616 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16617 	if (rv != 0)
   16618 		return rv;
   16619 
   16620 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16621 	    reg | HV_KMRN_MDIO_SLOW);
   16622 }
   16623 
   16624 /*
   16625  *  wm_configure_k1_ich8lan - Configure K1 power state
   16626  *  @sc: pointer to the HW structure
   16627  *  @enable: K1 state to configure
   16628  *
   16629  *  Configure the K1 power state based on the provided parameter.
   16630  *  Assumes semaphore already acquired.
   16631  */
   16632 static void
   16633 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16634 {
   16635 	uint32_t ctrl, ctrl_ext, tmp;
   16636 	uint16_t kmreg;
   16637 	int rv;
   16638 
   16639 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16640 
   16641 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16642 	if (rv != 0)
   16643 		return;
   16644 
   16645 	if (k1_enable)
   16646 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16647 	else
   16648 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16649 
   16650 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16651 	if (rv != 0)
   16652 		return;
   16653 
   16654 	delay(20);
   16655 
   16656 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16657 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16658 
   16659 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16660 	tmp |= CTRL_FRCSPD;
   16661 
   16662 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16663 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16664 	CSR_WRITE_FLUSH(sc);
   16665 	delay(20);
   16666 
   16667 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16668 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16669 	CSR_WRITE_FLUSH(sc);
   16670 	delay(20);
   16671 
   16672 	return;
   16673 }
   16674 
   16675 /* special case - for 82575 - need to do manual init ... */
   16676 static void
   16677 wm_reset_init_script_82575(struct wm_softc *sc)
   16678 {
   16679 	/*
   16680 	 * Remark: this is untested code - we have no board without EEPROM
   16681 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16682 	 */
   16683 
   16684 	/* SerDes configuration via SERDESCTRL */
   16685 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16686 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16687 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16688 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16689 
   16690 	/* CCM configuration via CCMCTL register */
   16691 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16692 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16693 
   16694 	/* PCIe lanes configuration */
   16695 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16696 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16697 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16698 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16699 
   16700 	/* PCIe PLL Configuration */
   16701 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16702 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16703 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16704 }
   16705 
   16706 static void
   16707 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16708 {
   16709 	uint32_t reg;
   16710 	uint16_t nvmword;
   16711 	int rv;
   16712 
   16713 	if (sc->sc_type != WM_T_82580)
   16714 		return;
   16715 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16716 		return;
   16717 
   16718 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16719 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16720 	if (rv != 0) {
   16721 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16722 		    __func__);
   16723 		return;
   16724 	}
   16725 
   16726 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16727 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16728 		reg |= MDICNFG_DEST;
   16729 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16730 		reg |= MDICNFG_COM_MDIO;
   16731 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16732 }
   16733 
   16734 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16735 
   16736 static bool
   16737 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16738 {
   16739 	uint32_t reg;
   16740 	uint16_t id1, id2;
   16741 	int i, rv;
   16742 
   16743 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16744 		device_xname(sc->sc_dev), __func__));
   16745 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16746 
   16747 	id1 = id2 = 0xffff;
   16748 	for (i = 0; i < 2; i++) {
   16749 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16750 		    &id1);
   16751 		if ((rv != 0) || MII_INVALIDID(id1))
   16752 			continue;
   16753 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16754 		    &id2);
   16755 		if ((rv != 0) || MII_INVALIDID(id2))
   16756 			continue;
   16757 		break;
   16758 	}
   16759 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16760 		goto out;
   16761 
   16762 	/*
   16763 	 * In case the PHY needs to be in mdio slow mode,
   16764 	 * set slow mode and try to get the PHY id again.
   16765 	 */
   16766 	rv = 0;
   16767 	if (sc->sc_type < WM_T_PCH_LPT) {
   16768 		sc->phy.release(sc);
   16769 		wm_set_mdio_slow_mode_hv(sc);
   16770 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16771 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16772 		sc->phy.acquire(sc);
   16773 	}
   16774 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16775 		device_printf(sc->sc_dev, "XXX return with false\n");
   16776 		return false;
   16777 	}
   16778 out:
   16779 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16780 		/* Only unforce SMBus if ME is not active */
   16781 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16782 			uint16_t phyreg;
   16783 
   16784 			/* Unforce SMBus mode in PHY */
   16785 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16786 			    CV_SMB_CTRL, &phyreg);
   16787 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16788 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16789 			    CV_SMB_CTRL, phyreg);
   16790 
   16791 			/* Unforce SMBus mode in MAC */
   16792 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16793 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16794 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16795 		}
   16796 	}
   16797 	return true;
   16798 }
   16799 
   16800 static void
   16801 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16802 {
   16803 	uint32_t reg;
   16804 	int i;
   16805 
   16806 	/* Set PHY Config Counter to 50msec */
   16807 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16808 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16809 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16810 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16811 
   16812 	/* Toggle LANPHYPC */
   16813 	reg = CSR_READ(sc, WMREG_CTRL);
   16814 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16815 	reg &= ~CTRL_LANPHYPC_VALUE;
   16816 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16817 	CSR_WRITE_FLUSH(sc);
   16818 	delay(1000);
   16819 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16820 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16821 	CSR_WRITE_FLUSH(sc);
   16822 
   16823 	if (sc->sc_type < WM_T_PCH_LPT)
   16824 		delay(50 * 1000);
   16825 	else {
   16826 		i = 20;
   16827 
   16828 		do {
   16829 			delay(5 * 1000);
   16830 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16831 		    && i--);
   16832 
   16833 		delay(30 * 1000);
   16834 	}
   16835 }
   16836 
   16837 static int
   16838 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16839 {
   16840 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16841 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16842 	uint32_t rxa;
   16843 	uint16_t scale = 0, lat_enc = 0;
   16844 	int32_t obff_hwm = 0;
   16845 	int64_t lat_ns, value;
   16846 
   16847 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16848 		device_xname(sc->sc_dev), __func__));
   16849 
   16850 	if (link) {
   16851 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16852 		uint32_t status;
   16853 		uint16_t speed;
   16854 		pcireg_t preg;
   16855 
   16856 		status = CSR_READ(sc, WMREG_STATUS);
   16857 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16858 		case STATUS_SPEED_10:
   16859 			speed = 10;
   16860 			break;
   16861 		case STATUS_SPEED_100:
   16862 			speed = 100;
   16863 			break;
   16864 		case STATUS_SPEED_1000:
   16865 			speed = 1000;
   16866 			break;
   16867 		default:
   16868 			device_printf(sc->sc_dev, "Unknown speed "
   16869 			    "(status = %08x)\n", status);
   16870 			return -1;
   16871 		}
   16872 
   16873 		/* Rx Packet Buffer Allocation size (KB) */
   16874 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16875 
   16876 		/*
   16877 		 * Determine the maximum latency tolerated by the device.
   16878 		 *
   16879 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16880 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16881 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16882 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16883 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16884 		 */
   16885 		lat_ns = ((int64_t)rxa * 1024 -
   16886 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16887 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16888 		if (lat_ns < 0)
   16889 			lat_ns = 0;
   16890 		else
   16891 			lat_ns /= speed;
   16892 		value = lat_ns;
   16893 
   16894 		while (value > LTRV_VALUE) {
   16895 			scale ++;
   16896 			value = howmany(value, __BIT(5));
   16897 		}
   16898 		if (scale > LTRV_SCALE_MAX) {
   16899 			device_printf(sc->sc_dev,
   16900 			    "Invalid LTR latency scale %d\n", scale);
   16901 			return -1;
   16902 		}
   16903 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16904 
   16905 		/* Determine the maximum latency tolerated by the platform */
   16906 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16907 		    WM_PCI_LTR_CAP_LPT);
   16908 		max_snoop = preg & 0xffff;
   16909 		max_nosnoop = preg >> 16;
   16910 
   16911 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16912 
   16913 		if (lat_enc > max_ltr_enc) {
   16914 			lat_enc = max_ltr_enc;
   16915 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16916 			    * PCI_LTR_SCALETONS(
   16917 				    __SHIFTOUT(lat_enc,
   16918 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16919 		}
   16920 
   16921 		if (lat_ns) {
   16922 			lat_ns *= speed * 1000;
   16923 			lat_ns /= 8;
   16924 			lat_ns /= 1000000000;
   16925 			obff_hwm = (int32_t)(rxa - lat_ns);
   16926 		}
   16927 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16928 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16929 			    "(rxa = %d, lat_ns = %d)\n",
   16930 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16931 			return -1;
   16932 		}
   16933 	}
   16934 	/* Snoop and No-Snoop latencies the same */
   16935 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16936 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16937 
   16938 	/* Set OBFF high water mark */
   16939 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16940 	reg |= obff_hwm;
   16941 	CSR_WRITE(sc, WMREG_SVT, reg);
   16942 
   16943 	/* Enable OBFF */
   16944 	reg = CSR_READ(sc, WMREG_SVCR);
   16945 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16946 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16947 
   16948 	return 0;
   16949 }
   16950 
   16951 /*
   16952  * I210 Errata 25 and I211 Errata 10
   16953  * Slow System Clock.
   16954  *
   16955  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   16956  */
   16957 static int
   16958 wm_pll_workaround_i210(struct wm_softc *sc)
   16959 {
   16960 	uint32_t mdicnfg, wuc;
   16961 	uint32_t reg;
   16962 	pcireg_t pcireg;
   16963 	uint32_t pmreg;
   16964 	uint16_t nvmword, tmp_nvmword;
   16965 	uint16_t phyval;
   16966 	bool wa_done = false;
   16967 	int i, rv = 0;
   16968 
   16969 	/* Get Power Management cap offset */
   16970 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16971 	    &pmreg, NULL) == 0)
   16972 		return -1;
   16973 
   16974 	/* Save WUC and MDICNFG registers */
   16975 	wuc = CSR_READ(sc, WMREG_WUC);
   16976 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16977 
   16978 	reg = mdicnfg & ~MDICNFG_DEST;
   16979 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16980 
   16981 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   16982 		/*
   16983 		 * The default value of the Initialization Control Word 1
   16984 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   16985 		 */
   16986 		nvmword = INVM_DEFAULT_AL;
   16987 	}
   16988 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16989 
   16990 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16991 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16992 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16993 
   16994 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16995 			rv = 0;
   16996 			break; /* OK */
   16997 		} else
   16998 			rv = -1;
   16999 
   17000 		wa_done = true;
   17001 		/* Directly reset the internal PHY */
   17002 		reg = CSR_READ(sc, WMREG_CTRL);
   17003 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   17004 
   17005 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17006 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   17007 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17008 
   17009 		CSR_WRITE(sc, WMREG_WUC, 0);
   17010 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   17011 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17012 
   17013 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17014 		    pmreg + PCI_PMCSR);
   17015 		pcireg |= PCI_PMCSR_STATE_D3;
   17016 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17017 		    pmreg + PCI_PMCSR, pcireg);
   17018 		delay(1000);
   17019 		pcireg &= ~PCI_PMCSR_STATE_D3;
   17020 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17021 		    pmreg + PCI_PMCSR, pcireg);
   17022 
   17023 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   17024 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17025 
   17026 		/* Restore WUC register */
   17027 		CSR_WRITE(sc, WMREG_WUC, wuc);
   17028 	}
   17029 
   17030 	/* Restore MDICNFG setting */
   17031 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   17032 	if (wa_done)
   17033 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   17034 	return rv;
   17035 }
   17036 
   17037 static void
   17038 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   17039 {
   17040 	uint32_t reg;
   17041 
   17042 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17043 		device_xname(sc->sc_dev), __func__));
   17044 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   17045 	    || (sc->sc_type == WM_T_PCH_CNP));
   17046 
   17047 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   17048 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   17049 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17050 
   17051 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17052 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17053 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17054 }
   17055 
   17056 /* Sysctl function */
   17057 #ifdef WM_DEBUG
   17058 static int
   17059 wm_sysctl_debug(SYSCTLFN_ARGS)
   17060 {
   17061 	struct sysctlnode node = *rnode;
   17062 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17063 	uint32_t dflags;
   17064 	int error;
   17065 
   17066 	dflags = sc->sc_debug;
   17067 	node.sysctl_data = &dflags;
   17068 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17069 
   17070 	if (error || newp == NULL)
   17071 		return error;
   17072 
   17073 	sc->sc_debug = dflags;
   17074 
   17075 	return 0;
   17076 }
   17077 #endif
   17078