Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.674
      1 /*	$NetBSD: if_wm.c,v 1.674 2020/04/09 06:55:51 jdolecek Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.674 2020/04/09 06:55:51 jdolecek Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <net/rss_config.h>
    121 
    122 #include <netinet/in.h>			/* XXX for struct ip */
    123 #include <netinet/in_systm.h>		/* XXX for struct ip */
    124 #include <netinet/ip.h>			/* XXX for struct ip */
    125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    127 
    128 #include <sys/bus.h>
    129 #include <sys/intr.h>
    130 #include <machine/endian.h>
    131 
    132 #include <dev/mii/mii.h>
    133 #include <dev/mii/mdio.h>
    134 #include <dev/mii/miivar.h>
    135 #include <dev/mii/miidevs.h>
    136 #include <dev/mii/mii_bitbang.h>
    137 #include <dev/mii/ikphyreg.h>
    138 #include <dev/mii/igphyreg.h>
    139 #include <dev/mii/igphyvar.h>
    140 #include <dev/mii/inbmphyreg.h>
    141 #include <dev/mii/ihphyreg.h>
    142 
    143 #include <dev/pci/pcireg.h>
    144 #include <dev/pci/pcivar.h>
    145 #include <dev/pci/pcidevs.h>
    146 
    147 #include <dev/pci/if_wmreg.h>
    148 #include <dev/pci/if_wmvar.h>
    149 
    150 #ifdef WM_DEBUG
    151 #define	WM_DEBUG_LINK		__BIT(0)
    152 #define	WM_DEBUG_TX		__BIT(1)
    153 #define	WM_DEBUG_RX		__BIT(2)
    154 #define	WM_DEBUG_GMII		__BIT(3)
    155 #define	WM_DEBUG_MANAGE		__BIT(4)
    156 #define	WM_DEBUG_NVM		__BIT(5)
    157 #define	WM_DEBUG_INIT		__BIT(6)
    158 #define	WM_DEBUG_LOCK		__BIT(7)
    159 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    160     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    161 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
    162 #else
    163 #define	DPRINTF(x, y)	__nothing
    164 #endif /* WM_DEBUG */
    165 
    166 #ifdef NET_MPSAFE
    167 #define WM_MPSAFE	1
    168 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    169 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    170 #else
    171 #define CALLOUT_FLAGS	0
    172 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    173 #endif
    174 
    175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    176 
    177 /*
    178  * This device driver's max interrupt numbers.
    179  */
    180 #define WM_MAX_NQUEUEINTR	16
    181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    182 
    183 #ifndef WM_DISABLE_MSI
    184 #define	WM_DISABLE_MSI 0
    185 #endif
    186 #ifndef WM_DISABLE_MSIX
    187 #define	WM_DISABLE_MSIX 0
    188 #endif
    189 
    190 int wm_disable_msi = WM_DISABLE_MSI;
    191 int wm_disable_msix = WM_DISABLE_MSIX;
    192 
    193 #ifndef WM_WATCHDOG_TIMEOUT
    194 #define WM_WATCHDOG_TIMEOUT 5
    195 #endif
    196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    197 
    198 /*
    199  * Transmit descriptor list size.  Due to errata, we can only have
    200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    201  * on >= 82544. We tell the upper layers that they can queue a lot
    202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    203  * of them at a time.
    204  *
    205  * We allow up to 64 DMA segments per packet.  Pathological packet
    206  * chains containing many small mbufs have been observed in zero-copy
    207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    208  * m_defrag() is called to reduce it.
    209  */
    210 #define	WM_NTXSEGS		64
    211 #define	WM_IFQUEUELEN		256
    212 #define	WM_TXQUEUELEN_MAX	64
    213 #define	WM_TXQUEUELEN_MAX_82547	16
    214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    217 #define	WM_NTXDESC_82542	256
    218 #define	WM_NTXDESC_82544	4096
    219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    224 
    225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    226 
    227 #define	WM_TXINTERQSIZE		256
    228 
    229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    231 #endif
    232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    234 #endif
    235 
    236 /*
    237  * Receive descriptor list size.  We have one Rx buffer for normal
    238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    239  * packet.  We allocate 256 receive descriptors, each with a 2k
    240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    241  */
    242 #define	WM_NRXDESC		256U
    243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    246 
    247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    249 #endif
    250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    252 #endif
    253 
    254 typedef union txdescs {
    255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    257 } txdescs_t;
    258 
    259 typedef union rxdescs {
    260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    261 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    263 } rxdescs_t;
    264 
    265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    267 
    268 /*
    269  * Software state for transmit jobs.
    270  */
    271 struct wm_txsoft {
    272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    274 	int txs_firstdesc;		/* first descriptor in packet */
    275 	int txs_lastdesc;		/* last descriptor in packet */
    276 	int txs_ndesc;			/* # of descriptors used */
    277 };
    278 
    279 /*
    280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    282  * them together.
    283  */
    284 struct wm_rxsoft {
    285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    287 };
    288 
    289 #define WM_LINKUP_TIMEOUT	50
    290 
    291 static uint16_t swfwphysem[] = {
    292 	SWFW_PHY0_SM,
    293 	SWFW_PHY1_SM,
    294 	SWFW_PHY2_SM,
    295 	SWFW_PHY3_SM
    296 };
    297 
    298 static const uint32_t wm_82580_rxpbs_table[] = {
    299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    300 };
    301 
    302 struct wm_softc;
    303 
    304 #ifdef WM_EVENT_COUNTERS
    305 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    306 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    307 	struct evcnt qname##_ev_##evname;
    308 
    309 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    310 	do {								\
    311 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    312 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    313 		    "%s%02d%s", #qname, (qnum), #evname);		\
    314 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    315 		    (evtype), NULL, (xname),				\
    316 		    (q)->qname##_##evname##_evcnt_name);		\
    317 	} while (0)
    318 
    319 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    320 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    321 
    322 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    323 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    324 
    325 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    326 	evcnt_detach(&(q)->qname##_ev_##evname);
    327 #endif /* WM_EVENT_COUNTERS */
    328 
    329 struct wm_txqueue {
    330 	kmutex_t *txq_lock;		/* lock for tx operations */
    331 
    332 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    333 
    334 	/* Software state for the transmit descriptors. */
    335 	int txq_num;			/* must be a power of two */
    336 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    337 
    338 	/* TX control data structures. */
    339 	int txq_ndesc;			/* must be a power of two */
    340 	size_t txq_descsize;		/* a tx descriptor size */
    341 	txdescs_t *txq_descs_u;
    342 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    343 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    344 	int txq_desc_rseg;		/* real number of control segment */
    345 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    346 #define	txq_descs	txq_descs_u->sctxu_txdescs
    347 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    348 
    349 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    350 
    351 	int txq_free;			/* number of free Tx descriptors */
    352 	int txq_next;			/* next ready Tx descriptor */
    353 
    354 	int txq_sfree;			/* number of free Tx jobs */
    355 	int txq_snext;			/* next free Tx job */
    356 	int txq_sdirty;			/* dirty Tx jobs */
    357 
    358 	/* These 4 variables are used only on the 82547. */
    359 	int txq_fifo_size;		/* Tx FIFO size */
    360 	int txq_fifo_head;		/* current head of FIFO */
    361 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    362 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    363 
    364 	/*
    365 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    366 	 * CPUs. This queue intermediate them without block.
    367 	 */
    368 	pcq_t *txq_interq;
    369 
    370 	/*
    371 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    372 	 * to manage Tx H/W queue's busy flag.
    373 	 */
    374 	int txq_flags;			/* flags for H/W queue, see below */
    375 #define	WM_TXQ_NO_SPACE	0x1
    376 
    377 	bool txq_stopping;
    378 
    379 	bool txq_sending;
    380 	time_t txq_lastsent;
    381 
    382 	/* Checksum flags used for previous packet */
    383 	uint32_t 	txq_last_hw_cmd;
    384 	uint8_t 	txq_last_hw_fields;
    385 	uint16_t	txq_last_hw_ipcs;
    386 	uint16_t	txq_last_hw_tucs;
    387 
    388 	uint32_t txq_packets;		/* for AIM */
    389 	uint32_t txq_bytes;		/* for AIM */
    390 #ifdef WM_EVENT_COUNTERS
    391 	/* TX event counters */
    392 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    393 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    394 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    395 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    396 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    397 					    /* XXX not used? */
    398 
    399 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    400 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    401 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    402 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    403 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    404 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    405 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    406 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    407 					    /* other than toomanyseg */
    408 
    409 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    410 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    411 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    412 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
    413 
    414 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    415 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    416 #endif /* WM_EVENT_COUNTERS */
    417 };
    418 
    419 struct wm_rxqueue {
    420 	kmutex_t *rxq_lock;		/* lock for rx operations */
    421 
    422 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    423 
    424 	/* Software state for the receive descriptors. */
    425 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    426 
    427 	/* RX control data structures. */
    428 	int rxq_ndesc;			/* must be a power of two */
    429 	size_t rxq_descsize;		/* a rx descriptor size */
    430 	rxdescs_t *rxq_descs_u;
    431 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    432 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    433 	int rxq_desc_rseg;		/* real number of control segment */
    434 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    435 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    436 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    437 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    438 
    439 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    440 
    441 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    442 	int rxq_discard;
    443 	int rxq_len;
    444 	struct mbuf *rxq_head;
    445 	struct mbuf *rxq_tail;
    446 	struct mbuf **rxq_tailp;
    447 
    448 	bool rxq_stopping;
    449 
    450 	uint32_t rxq_packets;		/* for AIM */
    451 	uint32_t rxq_bytes;		/* for AIM */
    452 #ifdef WM_EVENT_COUNTERS
    453 	/* RX event counters */
    454 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    455 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    456 
    457 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    458 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    459 #endif
    460 };
    461 
    462 struct wm_queue {
    463 	int wmq_id;			/* index of TX/RX queues */
    464 	int wmq_intr_idx;		/* index of MSI-X tables */
    465 
    466 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    467 	bool wmq_set_itr;
    468 
    469 	struct wm_txqueue wmq_txq;
    470 	struct wm_rxqueue wmq_rxq;
    471 
    472 	bool wmq_txrx_use_workqueue;
    473 	struct work wmq_cookie;
    474 	void *wmq_si;
    475 	krndsource_t rnd_source;	/* random source */
    476 };
    477 
    478 struct wm_phyop {
    479 	int (*acquire)(struct wm_softc *);
    480 	void (*release)(struct wm_softc *);
    481 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    482 	int (*writereg_locked)(device_t, int, int, uint16_t);
    483 	int reset_delay_us;
    484 	bool no_errprint;
    485 };
    486 
    487 struct wm_nvmop {
    488 	int (*acquire)(struct wm_softc *);
    489 	void (*release)(struct wm_softc *);
    490 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    491 };
    492 
    493 /*
    494  * Software state per device.
    495  */
    496 struct wm_softc {
    497 	device_t sc_dev;		/* generic device information */
    498 	bus_space_tag_t sc_st;		/* bus space tag */
    499 	bus_space_handle_t sc_sh;	/* bus space handle */
    500 	bus_size_t sc_ss;		/* bus space size */
    501 	bus_space_tag_t sc_iot;		/* I/O space tag */
    502 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    503 	bus_size_t sc_ios;		/* I/O space size */
    504 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    505 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    506 	bus_size_t sc_flashs;		/* flash registers space size */
    507 	off_t sc_flashreg_offset;	/*
    508 					 * offset to flash registers from
    509 					 * start of BAR
    510 					 */
    511 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    512 
    513 	struct ethercom sc_ethercom;	/* ethernet common data */
    514 	struct mii_data sc_mii;		/* MII/media information */
    515 
    516 	pci_chipset_tag_t sc_pc;
    517 	pcitag_t sc_pcitag;
    518 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    519 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    520 
    521 	uint16_t sc_pcidevid;		/* PCI device ID */
    522 	wm_chip_type sc_type;		/* MAC type */
    523 	int sc_rev;			/* MAC revision */
    524 	wm_phy_type sc_phytype;		/* PHY type */
    525 	uint8_t sc_sfptype;		/* SFP type */
    526 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    527 #define	WM_MEDIATYPE_UNKNOWN		0x00
    528 #define	WM_MEDIATYPE_FIBER		0x01
    529 #define	WM_MEDIATYPE_COPPER		0x02
    530 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    531 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    532 	int sc_flags;			/* flags; see below */
    533 	u_short sc_if_flags;		/* last if_flags */
    534 	int sc_ec_capenable;		/* last ec_capenable */
    535 	int sc_flowflags;		/* 802.3x flow control flags */
    536 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    537 	int sc_align_tweak;
    538 
    539 	void *sc_ihs[WM_MAX_NINTR];	/*
    540 					 * interrupt cookie.
    541 					 * - legacy and msi use sc_ihs[0] only
    542 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    543 					 */
    544 	pci_intr_handle_t *sc_intrs;	/*
    545 					 * legacy and msi use sc_intrs[0] only
    546 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    547 					 */
    548 	int sc_nintrs;			/* number of interrupts */
    549 
    550 	int sc_link_intr_idx;		/* index of MSI-X tables */
    551 
    552 	callout_t sc_tick_ch;		/* tick callout */
    553 	bool sc_core_stopping;
    554 
    555 	int sc_nvm_ver_major;
    556 	int sc_nvm_ver_minor;
    557 	int sc_nvm_ver_build;
    558 	int sc_nvm_addrbits;		/* NVM address bits */
    559 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    560 	int sc_ich8_flash_base;
    561 	int sc_ich8_flash_bank_size;
    562 	int sc_nvm_k1_enabled;
    563 
    564 	int sc_nqueues;
    565 	struct wm_queue *sc_queue;
    566 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    567 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    568 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    569 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    570 	struct workqueue *sc_queue_wq;
    571 	bool sc_txrx_use_workqueue;
    572 
    573 	int sc_affinity_offset;
    574 
    575 #ifdef WM_EVENT_COUNTERS
    576 	/* Event counters. */
    577 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    578 
    579 	/* WM_T_82542_2_1 only */
    580 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    581 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    582 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    583 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    584 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    585 #endif /* WM_EVENT_COUNTERS */
    586 
    587 	struct sysctllog *sc_sysctllog;
    588 
    589 	/* This variable are used only on the 82547. */
    590 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    591 
    592 	uint32_t sc_ctrl;		/* prototype CTRL register */
    593 #if 0
    594 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    595 #endif
    596 	uint32_t sc_icr;		/* prototype interrupt bits */
    597 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    598 	uint32_t sc_tctl;		/* prototype TCTL register */
    599 	uint32_t sc_rctl;		/* prototype RCTL register */
    600 	uint32_t sc_txcw;		/* prototype TXCW register */
    601 	uint32_t sc_tipg;		/* prototype TIPG register */
    602 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    603 	uint32_t sc_pba;		/* prototype PBA register */
    604 
    605 	int sc_tbi_linkup;		/* TBI link status */
    606 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    607 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    608 
    609 	int sc_mchash_type;		/* multicast filter offset */
    610 
    611 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    612 
    613 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    614 	kmutex_t *sc_ich_phymtx;	/*
    615 					 * 82574/82583/ICH/PCH specific PHY
    616 					 * mutex. For 82574/82583, the mutex
    617 					 * is used for both PHY and NVM.
    618 					 */
    619 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    620 
    621 	struct wm_phyop phy;
    622 	struct wm_nvmop nvm;
    623 };
    624 
    625 #define WM_CORE_LOCK(_sc)						\
    626 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    627 #define WM_CORE_UNLOCK(_sc)						\
    628 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    629 #define WM_CORE_LOCKED(_sc)						\
    630 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    631 
    632 #define	WM_RXCHAIN_RESET(rxq)						\
    633 do {									\
    634 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    635 	*(rxq)->rxq_tailp = NULL;					\
    636 	(rxq)->rxq_len = 0;						\
    637 } while (/*CONSTCOND*/0)
    638 
    639 #define	WM_RXCHAIN_LINK(rxq, m)						\
    640 do {									\
    641 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    642 	(rxq)->rxq_tailp = &(m)->m_next;				\
    643 } while (/*CONSTCOND*/0)
    644 
    645 #ifdef WM_EVENT_COUNTERS
    646 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    647 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    648 
    649 #define WM_Q_EVCNT_INCR(qname, evname)			\
    650 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    651 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    652 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    653 #else /* !WM_EVENT_COUNTERS */
    654 #define	WM_EVCNT_INCR(ev)	/* nothing */
    655 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    656 
    657 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    658 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    659 #endif /* !WM_EVENT_COUNTERS */
    660 
    661 #define	CSR_READ(sc, reg)						\
    662 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    663 #define	CSR_WRITE(sc, reg, val)						\
    664 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    665 #define	CSR_WRITE_FLUSH(sc)						\
    666 	(void)CSR_READ((sc), WMREG_STATUS)
    667 
    668 #define ICH8_FLASH_READ32(sc, reg)					\
    669 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    670 	    (reg) + sc->sc_flashreg_offset)
    671 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    672 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    673 	    (reg) + sc->sc_flashreg_offset, (data))
    674 
    675 #define ICH8_FLASH_READ16(sc, reg)					\
    676 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    677 	    (reg) + sc->sc_flashreg_offset)
    678 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    679 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    680 	    (reg) + sc->sc_flashreg_offset, (data))
    681 
    682 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    683 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    684 
    685 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    686 #define	WM_CDTXADDR_HI(txq, x)						\
    687 	(sizeof(bus_addr_t) == 8 ?					\
    688 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    689 
    690 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    691 #define	WM_CDRXADDR_HI(rxq, x)						\
    692 	(sizeof(bus_addr_t) == 8 ?					\
    693 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    694 
    695 /*
    696  * Register read/write functions.
    697  * Other than CSR_{READ|WRITE}().
    698  */
    699 #if 0
    700 static inline uint32_t wm_io_read(struct wm_softc *, int);
    701 #endif
    702 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    703 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    704     uint32_t, uint32_t);
    705 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    706 
    707 /*
    708  * Descriptor sync/init functions.
    709  */
    710 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    711 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    712 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    713 
    714 /*
    715  * Device driver interface functions and commonly used functions.
    716  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    717  */
    718 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    719 static int	wm_match(device_t, cfdata_t, void *);
    720 static void	wm_attach(device_t, device_t, void *);
    721 static int	wm_detach(device_t, int);
    722 static bool	wm_suspend(device_t, const pmf_qual_t *);
    723 static bool	wm_resume(device_t, const pmf_qual_t *);
    724 static void	wm_watchdog(struct ifnet *);
    725 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    726     uint16_t *);
    727 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    728     uint16_t *);
    729 static void	wm_tick(void *);
    730 static int	wm_ifflags_cb(struct ethercom *);
    731 static int	wm_ioctl(struct ifnet *, u_long, void *);
    732 /* MAC address related */
    733 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    734 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    735 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    736 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    737 static int	wm_rar_count(struct wm_softc *);
    738 static void	wm_set_filter(struct wm_softc *);
    739 /* Reset and init related */
    740 static void	wm_set_vlan(struct wm_softc *);
    741 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    742 static void	wm_get_auto_rd_done(struct wm_softc *);
    743 static void	wm_lan_init_done(struct wm_softc *);
    744 static void	wm_get_cfg_done(struct wm_softc *);
    745 static int	wm_phy_post_reset(struct wm_softc *);
    746 static int	wm_write_smbus_addr(struct wm_softc *);
    747 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    748 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    749 static void	wm_initialize_hardware_bits(struct wm_softc *);
    750 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    751 static int	wm_reset_phy(struct wm_softc *);
    752 static void	wm_flush_desc_rings(struct wm_softc *);
    753 static void	wm_reset(struct wm_softc *);
    754 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    755 static void	wm_rxdrain(struct wm_rxqueue *);
    756 static void	wm_init_rss(struct wm_softc *);
    757 static void	wm_adjust_qnum(struct wm_softc *, int);
    758 static inline bool	wm_is_using_msix(struct wm_softc *);
    759 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    760 static int	wm_softint_establish(struct wm_softc *, int, int);
    761 static int	wm_setup_legacy(struct wm_softc *);
    762 static int	wm_setup_msix(struct wm_softc *);
    763 static int	wm_init(struct ifnet *);
    764 static int	wm_init_locked(struct ifnet *);
    765 static void	wm_init_sysctls(struct wm_softc *);
    766 static void	wm_unset_stopping_flags(struct wm_softc *);
    767 static void	wm_set_stopping_flags(struct wm_softc *);
    768 static void	wm_stop(struct ifnet *, int);
    769 static void	wm_stop_locked(struct ifnet *, bool, bool);
    770 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    771 static void	wm_82547_txfifo_stall(void *);
    772 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    773 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    774 /* DMA related */
    775 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    776 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    777 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    778 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    779     struct wm_txqueue *);
    780 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    781 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    782 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    783     struct wm_rxqueue *);
    784 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    785 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    786 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    787 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    788 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    789 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    790 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    791     struct wm_txqueue *);
    792 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    793     struct wm_rxqueue *);
    794 static int	wm_alloc_txrx_queues(struct wm_softc *);
    795 static void	wm_free_txrx_queues(struct wm_softc *);
    796 static int	wm_init_txrx_queues(struct wm_softc *);
    797 /* Start */
    798 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    799     struct wm_txsoft *, uint32_t *, uint8_t *);
    800 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    801 static void	wm_start(struct ifnet *);
    802 static void	wm_start_locked(struct ifnet *);
    803 static int	wm_transmit(struct ifnet *, struct mbuf *);
    804 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    805 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    806 		    bool);
    807 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    808     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    809 static void	wm_nq_start(struct ifnet *);
    810 static void	wm_nq_start_locked(struct ifnet *);
    811 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    812 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    813 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    814 		    bool);
    815 static void	wm_deferred_start_locked(struct wm_txqueue *);
    816 static void	wm_handle_queue(void *);
    817 static void	wm_handle_queue_work(struct work *, void *);
    818 /* Interrupt */
    819 static bool	wm_txeof(struct wm_txqueue *, u_int);
    820 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    821 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    822 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    823 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    824 static void	wm_linkintr(struct wm_softc *, uint32_t);
    825 static int	wm_intr_legacy(void *);
    826 static inline void	wm_txrxintr_disable(struct wm_queue *);
    827 static inline void	wm_txrxintr_enable(struct wm_queue *);
    828 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    829 static int	wm_txrxintr_msix(void *);
    830 static int	wm_linkintr_msix(void *);
    831 
    832 /*
    833  * Media related.
    834  * GMII, SGMII, TBI, SERDES and SFP.
    835  */
    836 /* Common */
    837 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    838 /* GMII related */
    839 static void	wm_gmii_reset(struct wm_softc *);
    840 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    841 static int	wm_get_phy_id_82575(struct wm_softc *);
    842 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    843 static int	wm_gmii_mediachange(struct ifnet *);
    844 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    845 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    846 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    847 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    848 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    849 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    850 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    851 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    852 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    853 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    854 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    855 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    856 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    857 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    858 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    859 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    860 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    861 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    862 	bool);
    863 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    864 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    865 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    866 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    867 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    868 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    869 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    870 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    871 static void	wm_gmii_statchg(struct ifnet *);
    872 /*
    873  * kumeran related (80003, ICH* and PCH*).
    874  * These functions are not for accessing MII registers but for accessing
    875  * kumeran specific registers.
    876  */
    877 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    878 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    879 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    880 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    881 /* EMI register related */
    882 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    883 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    884 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    885 /* SGMII */
    886 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    887 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    888 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    889 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    890 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    891 /* TBI related */
    892 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    893 static void	wm_tbi_mediainit(struct wm_softc *);
    894 static int	wm_tbi_mediachange(struct ifnet *);
    895 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    896 static int	wm_check_for_link(struct wm_softc *);
    897 static void	wm_tbi_tick(struct wm_softc *);
    898 /* SERDES related */
    899 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    900 static int	wm_serdes_mediachange(struct ifnet *);
    901 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    902 static void	wm_serdes_tick(struct wm_softc *);
    903 /* SFP related */
    904 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    905 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    906 
    907 /*
    908  * NVM related.
    909  * Microwire, SPI (w/wo EERD) and Flash.
    910  */
    911 /* Misc functions */
    912 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    913 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    914 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    915 /* Microwire */
    916 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    917 /* SPI */
    918 static int	wm_nvm_ready_spi(struct wm_softc *);
    919 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    920 /* Using with EERD */
    921 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    922 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    923 /* Flash */
    924 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    925     unsigned int *);
    926 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    927 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    928 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    929     uint32_t *);
    930 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    931 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    932 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    933 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    934 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    935 /* iNVM */
    936 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    937 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    938 /* Lock, detecting NVM type, validate checksum and read */
    939 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    940 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    941 static int	wm_nvm_validate_checksum(struct wm_softc *);
    942 static void	wm_nvm_version_invm(struct wm_softc *);
    943 static void	wm_nvm_version(struct wm_softc *);
    944 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    945 
    946 /*
    947  * Hardware semaphores.
    948  * Very complexed...
    949  */
    950 static int	wm_get_null(struct wm_softc *);
    951 static void	wm_put_null(struct wm_softc *);
    952 static int	wm_get_eecd(struct wm_softc *);
    953 static void	wm_put_eecd(struct wm_softc *);
    954 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    955 static void	wm_put_swsm_semaphore(struct wm_softc *);
    956 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    957 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    958 static int	wm_get_nvm_80003(struct wm_softc *);
    959 static void	wm_put_nvm_80003(struct wm_softc *);
    960 static int	wm_get_nvm_82571(struct wm_softc *);
    961 static void	wm_put_nvm_82571(struct wm_softc *);
    962 static int	wm_get_phy_82575(struct wm_softc *);
    963 static void	wm_put_phy_82575(struct wm_softc *);
    964 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    965 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    966 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    967 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    968 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    969 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    970 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    971 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    972 
    973 /*
    974  * Management mode and power management related subroutines.
    975  * BMC, AMT, suspend/resume and EEE.
    976  */
    977 #if 0
    978 static int	wm_check_mng_mode(struct wm_softc *);
    979 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    980 static int	wm_check_mng_mode_82574(struct wm_softc *);
    981 static int	wm_check_mng_mode_generic(struct wm_softc *);
    982 #endif
    983 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    984 static bool	wm_phy_resetisblocked(struct wm_softc *);
    985 static void	wm_get_hw_control(struct wm_softc *);
    986 static void	wm_release_hw_control(struct wm_softc *);
    987 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    988 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    989 static void	wm_init_manageability(struct wm_softc *);
    990 static void	wm_release_manageability(struct wm_softc *);
    991 static void	wm_get_wakeup(struct wm_softc *);
    992 static int	wm_ulp_disable(struct wm_softc *);
    993 static int	wm_enable_phy_wakeup(struct wm_softc *);
    994 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    995 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    996 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    997 static void	wm_enable_wakeup(struct wm_softc *);
    998 static void	wm_disable_aspm(struct wm_softc *);
    999 /* LPLU (Low Power Link Up) */
   1000 static void	wm_lplu_d0_disable(struct wm_softc *);
   1001 /* EEE */
   1002 static int	wm_set_eee_i350(struct wm_softc *);
   1003 static int	wm_set_eee_pchlan(struct wm_softc *);
   1004 static int	wm_set_eee(struct wm_softc *);
   1005 
   1006 /*
   1007  * Workarounds (mainly PHY related).
   1008  * Basically, PHY's workarounds are in the PHY drivers.
   1009  */
   1010 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1011 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1012 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1013 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1014 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1015 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1016 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1017 static int	wm_k1_workaround_lv(struct wm_softc *);
   1018 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1019 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1020 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1021 static void	wm_reset_init_script_82575(struct wm_softc *);
   1022 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1023 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1024 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1025 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1026 static int	wm_pll_workaround_i210(struct wm_softc *);
   1027 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1028 
   1029 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1030     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1031 
   1032 /*
   1033  * Devices supported by this driver.
   1034  */
   1035 static const struct wm_product {
   1036 	pci_vendor_id_t		wmp_vendor;
   1037 	pci_product_id_t	wmp_product;
   1038 	const char		*wmp_name;
   1039 	wm_chip_type		wmp_type;
   1040 	uint32_t		wmp_flags;
   1041 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1042 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1043 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1044 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1045 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1046 } wm_products[] = {
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1048 	  "Intel i82542 1000BASE-X Ethernet",
   1049 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1052 	  "Intel i82543GC 1000BASE-X Ethernet",
   1053 	  WM_T_82543,		WMP_F_FIBER },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1056 	  "Intel i82543GC 1000BASE-T Ethernet",
   1057 	  WM_T_82543,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1060 	  "Intel i82544EI 1000BASE-T Ethernet",
   1061 	  WM_T_82544,		WMP_F_COPPER },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1064 	  "Intel i82544EI 1000BASE-X Ethernet",
   1065 	  WM_T_82544,		WMP_F_FIBER },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1068 	  "Intel i82544GC 1000BASE-T Ethernet",
   1069 	  WM_T_82544,		WMP_F_COPPER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1072 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1073 	  WM_T_82544,		WMP_F_COPPER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1076 	  "Intel i82540EM 1000BASE-T Ethernet",
   1077 	  WM_T_82540,		WMP_F_COPPER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1080 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1081 	  WM_T_82540,		WMP_F_COPPER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1084 	  "Intel i82540EP 1000BASE-T Ethernet",
   1085 	  WM_T_82540,		WMP_F_COPPER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1088 	  "Intel i82540EP 1000BASE-T Ethernet",
   1089 	  WM_T_82540,		WMP_F_COPPER },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1092 	  "Intel i82540EP 1000BASE-T Ethernet",
   1093 	  WM_T_82540,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1096 	  "Intel i82545EM 1000BASE-T Ethernet",
   1097 	  WM_T_82545,		WMP_F_COPPER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1100 	  "Intel i82545GM 1000BASE-T Ethernet",
   1101 	  WM_T_82545_3,		WMP_F_COPPER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1104 	  "Intel i82545GM 1000BASE-X Ethernet",
   1105 	  WM_T_82545_3,		WMP_F_FIBER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1108 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1109 	  WM_T_82545_3,		WMP_F_SERDES },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1112 	  "Intel i82546EB 1000BASE-T Ethernet",
   1113 	  WM_T_82546,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1116 	  "Intel i82546EB 1000BASE-T Ethernet",
   1117 	  WM_T_82546,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1120 	  "Intel i82545EM 1000BASE-X Ethernet",
   1121 	  WM_T_82545,		WMP_F_FIBER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1124 	  "Intel i82546EB 1000BASE-X Ethernet",
   1125 	  WM_T_82546,		WMP_F_FIBER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1128 	  "Intel i82546GB 1000BASE-T Ethernet",
   1129 	  WM_T_82546_3,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1132 	  "Intel i82546GB 1000BASE-X Ethernet",
   1133 	  WM_T_82546_3,		WMP_F_FIBER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1136 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1137 	  WM_T_82546_3,		WMP_F_SERDES },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1140 	  "i82546GB quad-port Gigabit Ethernet",
   1141 	  WM_T_82546_3,		WMP_F_COPPER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1144 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1145 	  WM_T_82546_3,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1148 	  "Intel PRO/1000MT (82546GB)",
   1149 	  WM_T_82546_3,		WMP_F_COPPER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1152 	  "Intel i82541EI 1000BASE-T Ethernet",
   1153 	  WM_T_82541,		WMP_F_COPPER },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1156 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1157 	  WM_T_82541,		WMP_F_COPPER },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1160 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1161 	  WM_T_82541,		WMP_F_COPPER },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1164 	  "Intel i82541ER 1000BASE-T Ethernet",
   1165 	  WM_T_82541_2,		WMP_F_COPPER },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1168 	  "Intel i82541GI 1000BASE-T Ethernet",
   1169 	  WM_T_82541_2,		WMP_F_COPPER },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1172 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1173 	  WM_T_82541_2,		WMP_F_COPPER },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1176 	  "Intel i82541PI 1000BASE-T Ethernet",
   1177 	  WM_T_82541_2,		WMP_F_COPPER },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1180 	  "Intel i82547EI 1000BASE-T Ethernet",
   1181 	  WM_T_82547,		WMP_F_COPPER },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1184 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1185 	  WM_T_82547,		WMP_F_COPPER },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1188 	  "Intel i82547GI 1000BASE-T Ethernet",
   1189 	  WM_T_82547_2,		WMP_F_COPPER },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1192 	  "Intel PRO/1000 PT (82571EB)",
   1193 	  WM_T_82571,		WMP_F_COPPER },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1196 	  "Intel PRO/1000 PF (82571EB)",
   1197 	  WM_T_82571,		WMP_F_FIBER },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1200 	  "Intel PRO/1000 PB (82571EB)",
   1201 	  WM_T_82571,		WMP_F_SERDES },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1204 	  "Intel PRO/1000 QT (82571EB)",
   1205 	  WM_T_82571,		WMP_F_COPPER },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1208 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1209 	  WM_T_82571,		WMP_F_COPPER },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1212 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1213 	  WM_T_82571,		WMP_F_COPPER },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1216 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1217 	  WM_T_82571,		WMP_F_SERDES },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1220 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1221 	  WM_T_82571,		WMP_F_SERDES },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1224 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1225 	  WM_T_82571,		WMP_F_FIBER },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1228 	  "Intel i82572EI 1000baseT Ethernet",
   1229 	  WM_T_82572,		WMP_F_COPPER },
   1230 
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1232 	  "Intel i82572EI 1000baseX Ethernet",
   1233 	  WM_T_82572,		WMP_F_FIBER },
   1234 
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1236 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1237 	  WM_T_82572,		WMP_F_SERDES },
   1238 
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1240 	  "Intel i82572EI 1000baseT Ethernet",
   1241 	  WM_T_82572,		WMP_F_COPPER },
   1242 
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1244 	  "Intel i82573E",
   1245 	  WM_T_82573,		WMP_F_COPPER },
   1246 
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1248 	  "Intel i82573E IAMT",
   1249 	  WM_T_82573,		WMP_F_COPPER },
   1250 
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1252 	  "Intel i82573L Gigabit Ethernet",
   1253 	  WM_T_82573,		WMP_F_COPPER },
   1254 
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1256 	  "Intel i82574L",
   1257 	  WM_T_82574,		WMP_F_COPPER },
   1258 
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1260 	  "Intel i82574L",
   1261 	  WM_T_82574,		WMP_F_COPPER },
   1262 
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1264 	  "Intel i82583V",
   1265 	  WM_T_82583,		WMP_F_COPPER },
   1266 
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1268 	  "i80003 dual 1000baseT Ethernet",
   1269 	  WM_T_80003,		WMP_F_COPPER },
   1270 
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1272 	  "i80003 dual 1000baseX Ethernet",
   1273 	  WM_T_80003,		WMP_F_COPPER },
   1274 
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1276 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1277 	  WM_T_80003,		WMP_F_SERDES },
   1278 
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1280 	  "Intel i80003 1000baseT Ethernet",
   1281 	  WM_T_80003,		WMP_F_COPPER },
   1282 
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1284 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1285 	  WM_T_80003,		WMP_F_SERDES },
   1286 
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1288 	  "Intel i82801H (M_AMT) LAN Controller",
   1289 	  WM_T_ICH8,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1291 	  "Intel i82801H (AMT) LAN Controller",
   1292 	  WM_T_ICH8,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1294 	  "Intel i82801H LAN Controller",
   1295 	  WM_T_ICH8,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1297 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1298 	  WM_T_ICH8,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1300 	  "Intel i82801H (M) LAN Controller",
   1301 	  WM_T_ICH8,		WMP_F_COPPER },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1303 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1304 	  WM_T_ICH8,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1306 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1307 	  WM_T_ICH8,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1309 	  "82567V-3 LAN Controller",
   1310 	  WM_T_ICH8,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1312 	  "82801I (AMT) LAN Controller",
   1313 	  WM_T_ICH9,		WMP_F_COPPER },
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1315 	  "82801I 10/100 LAN Controller",
   1316 	  WM_T_ICH9,		WMP_F_COPPER },
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1318 	  "82801I (G) 10/100 LAN Controller",
   1319 	  WM_T_ICH9,		WMP_F_COPPER },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1321 	  "82801I (GT) 10/100 LAN Controller",
   1322 	  WM_T_ICH9,		WMP_F_COPPER },
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1324 	  "82801I (C) LAN Controller",
   1325 	  WM_T_ICH9,		WMP_F_COPPER },
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1327 	  "82801I mobile LAN Controller",
   1328 	  WM_T_ICH9,		WMP_F_COPPER },
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1330 	  "82801I mobile (V) LAN Controller",
   1331 	  WM_T_ICH9,		WMP_F_COPPER },
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1333 	  "82801I mobile (AMT) LAN Controller",
   1334 	  WM_T_ICH9,		WMP_F_COPPER },
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1336 	  "82567LM-4 LAN Controller",
   1337 	  WM_T_ICH9,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1339 	  "82567LM-2 LAN Controller",
   1340 	  WM_T_ICH10,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1342 	  "82567LF-2 LAN Controller",
   1343 	  WM_T_ICH10,		WMP_F_COPPER },
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1345 	  "82567LM-3 LAN Controller",
   1346 	  WM_T_ICH10,		WMP_F_COPPER },
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1348 	  "82567LF-3 LAN Controller",
   1349 	  WM_T_ICH10,		WMP_F_COPPER },
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1351 	  "82567V-2 LAN Controller",
   1352 	  WM_T_ICH10,		WMP_F_COPPER },
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1354 	  "82567V-3? LAN Controller",
   1355 	  WM_T_ICH10,		WMP_F_COPPER },
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1357 	  "HANKSVILLE LAN Controller",
   1358 	  WM_T_ICH10,		WMP_F_COPPER },
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1360 	  "PCH LAN (82577LM) Controller",
   1361 	  WM_T_PCH,		WMP_F_COPPER },
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1363 	  "PCH LAN (82577LC) Controller",
   1364 	  WM_T_PCH,		WMP_F_COPPER },
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1366 	  "PCH LAN (82578DM) Controller",
   1367 	  WM_T_PCH,		WMP_F_COPPER },
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1369 	  "PCH LAN (82578DC) Controller",
   1370 	  WM_T_PCH,		WMP_F_COPPER },
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1372 	  "PCH2 LAN (82579LM) Controller",
   1373 	  WM_T_PCH2,		WMP_F_COPPER },
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1375 	  "PCH2 LAN (82579V) Controller",
   1376 	  WM_T_PCH2,		WMP_F_COPPER },
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1378 	  "82575EB dual-1000baseT Ethernet",
   1379 	  WM_T_82575,		WMP_F_COPPER },
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1381 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1382 	  WM_T_82575,		WMP_F_SERDES },
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1384 	  "82575GB quad-1000baseT Ethernet",
   1385 	  WM_T_82575,		WMP_F_COPPER },
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1387 	  "82575GB quad-1000baseT Ethernet (PM)",
   1388 	  WM_T_82575,		WMP_F_COPPER },
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1390 	  "82576 1000BaseT Ethernet",
   1391 	  WM_T_82576,		WMP_F_COPPER },
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1393 	  "82576 1000BaseX Ethernet",
   1394 	  WM_T_82576,		WMP_F_FIBER },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1397 	  "82576 gigabit Ethernet (SERDES)",
   1398 	  WM_T_82576,		WMP_F_SERDES },
   1399 
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1401 	  "82576 quad-1000BaseT Ethernet",
   1402 	  WM_T_82576,		WMP_F_COPPER },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1405 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1406 	  WM_T_82576,		WMP_F_COPPER },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1409 	  "82576 gigabit Ethernet",
   1410 	  WM_T_82576,		WMP_F_COPPER },
   1411 
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1413 	  "82576 gigabit Ethernet (SERDES)",
   1414 	  WM_T_82576,		WMP_F_SERDES },
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1416 	  "82576 quad-gigabit Ethernet (SERDES)",
   1417 	  WM_T_82576,		WMP_F_SERDES },
   1418 
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1420 	  "82580 1000BaseT Ethernet",
   1421 	  WM_T_82580,		WMP_F_COPPER },
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1423 	  "82580 1000BaseX Ethernet",
   1424 	  WM_T_82580,		WMP_F_FIBER },
   1425 
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1427 	  "82580 1000BaseT Ethernet (SERDES)",
   1428 	  WM_T_82580,		WMP_F_SERDES },
   1429 
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1431 	  "82580 gigabit Ethernet (SGMII)",
   1432 	  WM_T_82580,		WMP_F_COPPER },
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1434 	  "82580 dual-1000BaseT Ethernet",
   1435 	  WM_T_82580,		WMP_F_COPPER },
   1436 
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1438 	  "82580 quad-1000BaseX Ethernet",
   1439 	  WM_T_82580,		WMP_F_FIBER },
   1440 
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1442 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1443 	  WM_T_82580,		WMP_F_COPPER },
   1444 
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1446 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1447 	  WM_T_82580,		WMP_F_SERDES },
   1448 
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1450 	  "DH89XXCC 1000BASE-KX Ethernet",
   1451 	  WM_T_82580,		WMP_F_SERDES },
   1452 
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1454 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1455 	  WM_T_82580,		WMP_F_SERDES },
   1456 
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1458 	  "I350 Gigabit Network Connection",
   1459 	  WM_T_I350,		WMP_F_COPPER },
   1460 
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1462 	  "I350 Gigabit Fiber Network Connection",
   1463 	  WM_T_I350,		WMP_F_FIBER },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1466 	  "I350 Gigabit Backplane Connection",
   1467 	  WM_T_I350,		WMP_F_SERDES },
   1468 
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1470 	  "I350 Quad Port Gigabit Ethernet",
   1471 	  WM_T_I350,		WMP_F_SERDES },
   1472 
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1474 	  "I350 Gigabit Connection",
   1475 	  WM_T_I350,		WMP_F_COPPER },
   1476 
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1478 	  "I354 Gigabit Ethernet (KX)",
   1479 	  WM_T_I354,		WMP_F_SERDES },
   1480 
   1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1482 	  "I354 Gigabit Ethernet (SGMII)",
   1483 	  WM_T_I354,		WMP_F_COPPER },
   1484 
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1486 	  "I354 Gigabit Ethernet (2.5G)",
   1487 	  WM_T_I354,		WMP_F_COPPER },
   1488 
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1490 	  "I210-T1 Ethernet Server Adapter",
   1491 	  WM_T_I210,		WMP_F_COPPER },
   1492 
   1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1494 	  "I210 Ethernet (Copper OEM)",
   1495 	  WM_T_I210,		WMP_F_COPPER },
   1496 
   1497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1498 	  "I210 Ethernet (Copper IT)",
   1499 	  WM_T_I210,		WMP_F_COPPER },
   1500 
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1502 	  "I210 Ethernet (Copper, FLASH less)",
   1503 	  WM_T_I210,		WMP_F_COPPER },
   1504 
   1505 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1506 	  "I210 Gigabit Ethernet (Fiber)",
   1507 	  WM_T_I210,		WMP_F_FIBER },
   1508 
   1509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1510 	  "I210 Gigabit Ethernet (SERDES)",
   1511 	  WM_T_I210,		WMP_F_SERDES },
   1512 
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1514 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1515 	  WM_T_I210,		WMP_F_SERDES },
   1516 
   1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1518 	  "I210 Gigabit Ethernet (SGMII)",
   1519 	  WM_T_I210,		WMP_F_COPPER },
   1520 
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1522 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1523 	  WM_T_I210,		WMP_F_COPPER },
   1524 
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1526 	  "I211 Ethernet (COPPER)",
   1527 	  WM_T_I211,		WMP_F_COPPER },
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1529 	  "I217 V Ethernet Connection",
   1530 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1532 	  "I217 LM Ethernet Connection",
   1533 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1535 	  "I218 V Ethernet Connection",
   1536 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1538 	  "I218 V Ethernet Connection",
   1539 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1541 	  "I218 V Ethernet Connection",
   1542 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1544 	  "I218 LM Ethernet Connection",
   1545 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1547 	  "I218 LM Ethernet Connection",
   1548 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1550 	  "I218 LM Ethernet Connection",
   1551 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1553 	  "I219 LM Ethernet Connection",
   1554 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1556 	  "I219 LM Ethernet Connection",
   1557 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1559 	  "I219 LM Ethernet Connection",
   1560 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1562 	  "I219 LM Ethernet Connection",
   1563 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1564 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1565 	  "I219 LM Ethernet Connection",
   1566 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1567 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1568 	  "I219 LM Ethernet Connection",
   1569 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1570 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1571 	  "I219 LM Ethernet Connection",
   1572 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1573 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1574 	  "I219 LM Ethernet Connection",
   1575 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1577 	  "I219 LM Ethernet Connection",
   1578 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1580 	  "I219 LM Ethernet Connection",
   1581 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1582 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1583 	  "I219 LM Ethernet Connection",
   1584 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1585 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1586 	  "I219 LM Ethernet Connection",
   1587 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1588 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1589 	  "I219 LM Ethernet Connection",
   1590 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1591 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1592 	  "I219 LM Ethernet Connection",
   1593 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1594 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1595 	  "I219 LM Ethernet Connection",
   1596 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1597 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1598 	  "I219 V Ethernet Connection",
   1599 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1600 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1601 	  "I219 V Ethernet Connection",
   1602 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1603 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1604 	  "I219 V Ethernet Connection",
   1605 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1606 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1607 	  "I219 V Ethernet Connection",
   1608 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1609 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1610 	  "I219 V Ethernet Connection",
   1611 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1612 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1613 	  "I219 V Ethernet Connection",
   1614 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1615 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1616 	  "I219 V Ethernet Connection",
   1617 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1618 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1619 	  "I219 V Ethernet Connection",
   1620 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1621 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1622 	  "I219 V Ethernet Connection",
   1623 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1624 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1625 	  "I219 V Ethernet Connection",
   1626 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1627 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1628 	  "I219 V Ethernet Connection",
   1629 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1630 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1631 	  "I219 V Ethernet Connection",
   1632 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1633 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1634 	  "I219 V Ethernet Connection",
   1635 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1636 	{ 0,			0,
   1637 	  NULL,
   1638 	  0,			0 },
   1639 };
   1640 
   1641 /*
   1642  * Register read/write functions.
   1643  * Other than CSR_{READ|WRITE}().
   1644  */
   1645 
   1646 #if 0 /* Not currently used */
   1647 static inline uint32_t
   1648 wm_io_read(struct wm_softc *sc, int reg)
   1649 {
   1650 
   1651 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1652 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1653 }
   1654 #endif
   1655 
   1656 static inline void
   1657 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1658 {
   1659 
   1660 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1661 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1662 }
   1663 
   1664 static inline void
   1665 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1666     uint32_t data)
   1667 {
   1668 	uint32_t regval;
   1669 	int i;
   1670 
   1671 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1672 
   1673 	CSR_WRITE(sc, reg, regval);
   1674 
   1675 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1676 		delay(5);
   1677 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1678 			break;
   1679 	}
   1680 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1681 		aprint_error("%s: WARNING:"
   1682 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1683 		    device_xname(sc->sc_dev), reg);
   1684 	}
   1685 }
   1686 
   1687 static inline void
   1688 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1689 {
   1690 	wa->wa_low = htole32(v & 0xffffffffU);
   1691 	if (sizeof(bus_addr_t) == 8)
   1692 		wa->wa_high = htole32((uint64_t) v >> 32);
   1693 	else
   1694 		wa->wa_high = 0;
   1695 }
   1696 
   1697 /*
   1698  * Descriptor sync/init functions.
   1699  */
   1700 static inline void
   1701 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1702 {
   1703 	struct wm_softc *sc = txq->txq_sc;
   1704 
   1705 	/* If it will wrap around, sync to the end of the ring. */
   1706 	if ((start + num) > WM_NTXDESC(txq)) {
   1707 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1708 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1709 		    (WM_NTXDESC(txq) - start), ops);
   1710 		num -= (WM_NTXDESC(txq) - start);
   1711 		start = 0;
   1712 	}
   1713 
   1714 	/* Now sync whatever is left. */
   1715 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1716 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1717 }
   1718 
   1719 static inline void
   1720 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1721 {
   1722 	struct wm_softc *sc = rxq->rxq_sc;
   1723 
   1724 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1725 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1726 }
   1727 
   1728 static inline void
   1729 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1730 {
   1731 	struct wm_softc *sc = rxq->rxq_sc;
   1732 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1733 	struct mbuf *m = rxs->rxs_mbuf;
   1734 
   1735 	/*
   1736 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1737 	 * so that the payload after the Ethernet header is aligned
   1738 	 * to a 4-byte boundary.
   1739 
   1740 	 * XXX BRAINDAMAGE ALERT!
   1741 	 * The stupid chip uses the same size for every buffer, which
   1742 	 * is set in the Receive Control register.  We are using the 2K
   1743 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1744 	 * reason, we can't "scoot" packets longer than the standard
   1745 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1746 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1747 	 * the upper layer copy the headers.
   1748 	 */
   1749 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1750 
   1751 	if (sc->sc_type == WM_T_82574) {
   1752 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1753 		rxd->erx_data.erxd_addr =
   1754 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1755 		rxd->erx_data.erxd_dd = 0;
   1756 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1757 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1758 
   1759 		rxd->nqrx_data.nrxd_paddr =
   1760 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1761 		/* Currently, split header is not supported. */
   1762 		rxd->nqrx_data.nrxd_haddr = 0;
   1763 	} else {
   1764 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1765 
   1766 		wm_set_dma_addr(&rxd->wrx_addr,
   1767 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1768 		rxd->wrx_len = 0;
   1769 		rxd->wrx_cksum = 0;
   1770 		rxd->wrx_status = 0;
   1771 		rxd->wrx_errors = 0;
   1772 		rxd->wrx_special = 0;
   1773 	}
   1774 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1775 
   1776 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1777 }
   1778 
   1779 /*
   1780  * Device driver interface functions and commonly used functions.
   1781  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1782  */
   1783 
   1784 /* Lookup supported device table */
   1785 static const struct wm_product *
   1786 wm_lookup(const struct pci_attach_args *pa)
   1787 {
   1788 	const struct wm_product *wmp;
   1789 
   1790 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1791 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1792 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1793 			return wmp;
   1794 	}
   1795 	return NULL;
   1796 }
   1797 
   1798 /* The match function (ca_match) */
   1799 static int
   1800 wm_match(device_t parent, cfdata_t cf, void *aux)
   1801 {
   1802 	struct pci_attach_args *pa = aux;
   1803 
   1804 	if (wm_lookup(pa) != NULL)
   1805 		return 1;
   1806 
   1807 	return 0;
   1808 }
   1809 
   1810 /* The attach function (ca_attach) */
   1811 static void
   1812 wm_attach(device_t parent, device_t self, void *aux)
   1813 {
   1814 	struct wm_softc *sc = device_private(self);
   1815 	struct pci_attach_args *pa = aux;
   1816 	prop_dictionary_t dict;
   1817 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1818 	pci_chipset_tag_t pc = pa->pa_pc;
   1819 	int counts[PCI_INTR_TYPE_SIZE];
   1820 	pci_intr_type_t max_type;
   1821 	const char *eetype, *xname;
   1822 	bus_space_tag_t memt;
   1823 	bus_space_handle_t memh;
   1824 	bus_size_t memsize;
   1825 	int memh_valid;
   1826 	int i, error;
   1827 	const struct wm_product *wmp;
   1828 	prop_data_t ea;
   1829 	prop_number_t pn;
   1830 	uint8_t enaddr[ETHER_ADDR_LEN];
   1831 	char buf[256];
   1832 	char wqname[MAXCOMLEN];
   1833 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1834 	pcireg_t preg, memtype;
   1835 	uint16_t eeprom_data, apme_mask;
   1836 	bool force_clear_smbi;
   1837 	uint32_t link_mode;
   1838 	uint32_t reg;
   1839 
   1840 	sc->sc_dev = self;
   1841 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1842 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1843 	sc->sc_core_stopping = false;
   1844 
   1845 	wmp = wm_lookup(pa);
   1846 #ifdef DIAGNOSTIC
   1847 	if (wmp == NULL) {
   1848 		printf("\n");
   1849 		panic("wm_attach: impossible");
   1850 	}
   1851 #endif
   1852 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1853 
   1854 	sc->sc_pc = pa->pa_pc;
   1855 	sc->sc_pcitag = pa->pa_tag;
   1856 
   1857 	if (pci_dma64_available(pa))
   1858 		sc->sc_dmat = pa->pa_dmat64;
   1859 	else
   1860 		sc->sc_dmat = pa->pa_dmat;
   1861 
   1862 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1863 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1864 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1865 
   1866 	sc->sc_type = wmp->wmp_type;
   1867 
   1868 	/* Set default function pointers */
   1869 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1870 	sc->phy.release = sc->nvm.release = wm_put_null;
   1871 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1872 
   1873 	if (sc->sc_type < WM_T_82543) {
   1874 		if (sc->sc_rev < 2) {
   1875 			aprint_error_dev(sc->sc_dev,
   1876 			    "i82542 must be at least rev. 2\n");
   1877 			return;
   1878 		}
   1879 		if (sc->sc_rev < 3)
   1880 			sc->sc_type = WM_T_82542_2_0;
   1881 	}
   1882 
   1883 	/*
   1884 	 * Disable MSI for Errata:
   1885 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1886 	 *
   1887 	 *  82544: Errata 25
   1888 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1889 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1890 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1891 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1892 	 *
   1893 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1894 	 *
   1895 	 *  82571 & 82572: Errata 63
   1896 	 */
   1897 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1898 	    || (sc->sc_type == WM_T_82572))
   1899 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1900 
   1901 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1902 	    || (sc->sc_type == WM_T_82580)
   1903 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1904 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1905 		sc->sc_flags |= WM_F_NEWQUEUE;
   1906 
   1907 	/* Set device properties (mactype) */
   1908 	dict = device_properties(sc->sc_dev);
   1909 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1910 
   1911 	/*
   1912 	 * Map the device.  All devices support memory-mapped acccess,
   1913 	 * and it is really required for normal operation.
   1914 	 */
   1915 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1916 	switch (memtype) {
   1917 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1918 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1919 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1920 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1921 		break;
   1922 	default:
   1923 		memh_valid = 0;
   1924 		break;
   1925 	}
   1926 
   1927 	if (memh_valid) {
   1928 		sc->sc_st = memt;
   1929 		sc->sc_sh = memh;
   1930 		sc->sc_ss = memsize;
   1931 	} else {
   1932 		aprint_error_dev(sc->sc_dev,
   1933 		    "unable to map device registers\n");
   1934 		return;
   1935 	}
   1936 
   1937 	/*
   1938 	 * In addition, i82544 and later support I/O mapped indirect
   1939 	 * register access.  It is not desirable (nor supported in
   1940 	 * this driver) to use it for normal operation, though it is
   1941 	 * required to work around bugs in some chip versions.
   1942 	 */
   1943 	if (sc->sc_type >= WM_T_82544) {
   1944 		/* First we have to find the I/O BAR. */
   1945 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1946 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1947 			if (memtype == PCI_MAPREG_TYPE_IO)
   1948 				break;
   1949 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1950 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1951 				i += 4;	/* skip high bits, too */
   1952 		}
   1953 		if (i < PCI_MAPREG_END) {
   1954 			/*
   1955 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1956 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1957 			 * It's no problem because newer chips has no this
   1958 			 * bug.
   1959 			 *
   1960 			 * The i8254x doesn't apparently respond when the
   1961 			 * I/O BAR is 0, which looks somewhat like it's not
   1962 			 * been configured.
   1963 			 */
   1964 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1965 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1966 				aprint_error_dev(sc->sc_dev,
   1967 				    "WARNING: I/O BAR at zero.\n");
   1968 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1969 					0, &sc->sc_iot, &sc->sc_ioh,
   1970 					NULL, &sc->sc_ios) == 0) {
   1971 				sc->sc_flags |= WM_F_IOH_VALID;
   1972 			} else
   1973 				aprint_error_dev(sc->sc_dev,
   1974 				    "WARNING: unable to map I/O space\n");
   1975 		}
   1976 
   1977 	}
   1978 
   1979 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1980 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1981 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1982 	if (sc->sc_type < WM_T_82542_2_1)
   1983 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1984 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1985 
   1986 	/* Power up chip */
   1987 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1988 	    && error != EOPNOTSUPP) {
   1989 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1990 		return;
   1991 	}
   1992 
   1993 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1994 	/*
   1995 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1996 	 * resource.
   1997 	 */
   1998 	if (sc->sc_nqueues > 1) {
   1999 		max_type = PCI_INTR_TYPE_MSIX;
   2000 		/*
   2001 		 *  82583 has a MSI-X capability in the PCI configuration space
   2002 		 * but it doesn't support it. At least the document doesn't
   2003 		 * say anything about MSI-X.
   2004 		 */
   2005 		counts[PCI_INTR_TYPE_MSIX]
   2006 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2007 	} else {
   2008 		max_type = PCI_INTR_TYPE_MSI;
   2009 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2010 	}
   2011 
   2012 	/* Allocation settings */
   2013 	counts[PCI_INTR_TYPE_MSI] = 1;
   2014 	counts[PCI_INTR_TYPE_INTX] = 1;
   2015 	/* overridden by disable flags */
   2016 	if (wm_disable_msi != 0) {
   2017 		counts[PCI_INTR_TYPE_MSI] = 0;
   2018 		if (wm_disable_msix != 0) {
   2019 			max_type = PCI_INTR_TYPE_INTX;
   2020 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2021 		}
   2022 	} else if (wm_disable_msix != 0) {
   2023 		max_type = PCI_INTR_TYPE_MSI;
   2024 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2025 	}
   2026 
   2027 alloc_retry:
   2028 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2029 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2030 		return;
   2031 	}
   2032 
   2033 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2034 		error = wm_setup_msix(sc);
   2035 		if (error) {
   2036 			pci_intr_release(pc, sc->sc_intrs,
   2037 			    counts[PCI_INTR_TYPE_MSIX]);
   2038 
   2039 			/* Setup for MSI: Disable MSI-X */
   2040 			max_type = PCI_INTR_TYPE_MSI;
   2041 			counts[PCI_INTR_TYPE_MSI] = 1;
   2042 			counts[PCI_INTR_TYPE_INTX] = 1;
   2043 			goto alloc_retry;
   2044 		}
   2045 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2046 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2047 		error = wm_setup_legacy(sc);
   2048 		if (error) {
   2049 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2050 			    counts[PCI_INTR_TYPE_MSI]);
   2051 
   2052 			/* The next try is for INTx: Disable MSI */
   2053 			max_type = PCI_INTR_TYPE_INTX;
   2054 			counts[PCI_INTR_TYPE_INTX] = 1;
   2055 			goto alloc_retry;
   2056 		}
   2057 	} else {
   2058 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2059 		error = wm_setup_legacy(sc);
   2060 		if (error) {
   2061 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2062 			    counts[PCI_INTR_TYPE_INTX]);
   2063 			return;
   2064 		}
   2065 	}
   2066 
   2067 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2068 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2069 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2070 	    WM_WORKQUEUE_FLAGS);
   2071 	if (error) {
   2072 		aprint_error_dev(sc->sc_dev,
   2073 		    "unable to create workqueue\n");
   2074 		goto out;
   2075 	}
   2076 
   2077 	/*
   2078 	 * Check the function ID (unit number of the chip).
   2079 	 */
   2080 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2081 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2082 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2083 	    || (sc->sc_type == WM_T_82580)
   2084 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2085 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2086 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2087 	else
   2088 		sc->sc_funcid = 0;
   2089 
   2090 	/*
   2091 	 * Determine a few things about the bus we're connected to.
   2092 	 */
   2093 	if (sc->sc_type < WM_T_82543) {
   2094 		/* We don't really know the bus characteristics here. */
   2095 		sc->sc_bus_speed = 33;
   2096 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2097 		/*
   2098 		 * CSA (Communication Streaming Architecture) is about as fast
   2099 		 * a 32-bit 66MHz PCI Bus.
   2100 		 */
   2101 		sc->sc_flags |= WM_F_CSA;
   2102 		sc->sc_bus_speed = 66;
   2103 		aprint_verbose_dev(sc->sc_dev,
   2104 		    "Communication Streaming Architecture\n");
   2105 		if (sc->sc_type == WM_T_82547) {
   2106 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2107 			callout_setfunc(&sc->sc_txfifo_ch,
   2108 			    wm_82547_txfifo_stall, sc);
   2109 			aprint_verbose_dev(sc->sc_dev,
   2110 			    "using 82547 Tx FIFO stall work-around\n");
   2111 		}
   2112 	} else if (sc->sc_type >= WM_T_82571) {
   2113 		sc->sc_flags |= WM_F_PCIE;
   2114 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2115 		    && (sc->sc_type != WM_T_ICH10)
   2116 		    && (sc->sc_type != WM_T_PCH)
   2117 		    && (sc->sc_type != WM_T_PCH2)
   2118 		    && (sc->sc_type != WM_T_PCH_LPT)
   2119 		    && (sc->sc_type != WM_T_PCH_SPT)
   2120 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2121 			/* ICH* and PCH* have no PCIe capability registers */
   2122 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2123 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2124 				NULL) == 0)
   2125 				aprint_error_dev(sc->sc_dev,
   2126 				    "unable to find PCIe capability\n");
   2127 		}
   2128 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2129 	} else {
   2130 		reg = CSR_READ(sc, WMREG_STATUS);
   2131 		if (reg & STATUS_BUS64)
   2132 			sc->sc_flags |= WM_F_BUS64;
   2133 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2134 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2135 
   2136 			sc->sc_flags |= WM_F_PCIX;
   2137 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2138 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2139 				aprint_error_dev(sc->sc_dev,
   2140 				    "unable to find PCIX capability\n");
   2141 			else if (sc->sc_type != WM_T_82545_3 &&
   2142 				 sc->sc_type != WM_T_82546_3) {
   2143 				/*
   2144 				 * Work around a problem caused by the BIOS
   2145 				 * setting the max memory read byte count
   2146 				 * incorrectly.
   2147 				 */
   2148 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2149 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2150 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2151 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2152 
   2153 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2154 				    PCIX_CMD_BYTECNT_SHIFT;
   2155 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2156 				    PCIX_STATUS_MAXB_SHIFT;
   2157 				if (bytecnt > maxb) {
   2158 					aprint_verbose_dev(sc->sc_dev,
   2159 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2160 					    512 << bytecnt, 512 << maxb);
   2161 					pcix_cmd = (pcix_cmd &
   2162 					    ~PCIX_CMD_BYTECNT_MASK) |
   2163 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2164 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2165 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2166 					    pcix_cmd);
   2167 				}
   2168 			}
   2169 		}
   2170 		/*
   2171 		 * The quad port adapter is special; it has a PCIX-PCIX
   2172 		 * bridge on the board, and can run the secondary bus at
   2173 		 * a higher speed.
   2174 		 */
   2175 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2176 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2177 								      : 66;
   2178 		} else if (sc->sc_flags & WM_F_PCIX) {
   2179 			switch (reg & STATUS_PCIXSPD_MASK) {
   2180 			case STATUS_PCIXSPD_50_66:
   2181 				sc->sc_bus_speed = 66;
   2182 				break;
   2183 			case STATUS_PCIXSPD_66_100:
   2184 				sc->sc_bus_speed = 100;
   2185 				break;
   2186 			case STATUS_PCIXSPD_100_133:
   2187 				sc->sc_bus_speed = 133;
   2188 				break;
   2189 			default:
   2190 				aprint_error_dev(sc->sc_dev,
   2191 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2192 				    reg & STATUS_PCIXSPD_MASK);
   2193 				sc->sc_bus_speed = 66;
   2194 				break;
   2195 			}
   2196 		} else
   2197 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2198 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2199 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2200 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2201 	}
   2202 
   2203 	/* clear interesting stat counters */
   2204 	CSR_READ(sc, WMREG_COLC);
   2205 	CSR_READ(sc, WMREG_RXERRC);
   2206 
   2207 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2208 	    || (sc->sc_type >= WM_T_ICH8))
   2209 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2210 	if (sc->sc_type >= WM_T_ICH8)
   2211 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2212 
   2213 	/* Set PHY, NVM mutex related stuff */
   2214 	switch (sc->sc_type) {
   2215 	case WM_T_82542_2_0:
   2216 	case WM_T_82542_2_1:
   2217 	case WM_T_82543:
   2218 	case WM_T_82544:
   2219 		/* Microwire */
   2220 		sc->nvm.read = wm_nvm_read_uwire;
   2221 		sc->sc_nvm_wordsize = 64;
   2222 		sc->sc_nvm_addrbits = 6;
   2223 		break;
   2224 	case WM_T_82540:
   2225 	case WM_T_82545:
   2226 	case WM_T_82545_3:
   2227 	case WM_T_82546:
   2228 	case WM_T_82546_3:
   2229 		/* Microwire */
   2230 		sc->nvm.read = wm_nvm_read_uwire;
   2231 		reg = CSR_READ(sc, WMREG_EECD);
   2232 		if (reg & EECD_EE_SIZE) {
   2233 			sc->sc_nvm_wordsize = 256;
   2234 			sc->sc_nvm_addrbits = 8;
   2235 		} else {
   2236 			sc->sc_nvm_wordsize = 64;
   2237 			sc->sc_nvm_addrbits = 6;
   2238 		}
   2239 		sc->sc_flags |= WM_F_LOCK_EECD;
   2240 		sc->nvm.acquire = wm_get_eecd;
   2241 		sc->nvm.release = wm_put_eecd;
   2242 		break;
   2243 	case WM_T_82541:
   2244 	case WM_T_82541_2:
   2245 	case WM_T_82547:
   2246 	case WM_T_82547_2:
   2247 		reg = CSR_READ(sc, WMREG_EECD);
   2248 		/*
   2249 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2250 		 * on 8254[17], so set flags and functios before calling it.
   2251 		 */
   2252 		sc->sc_flags |= WM_F_LOCK_EECD;
   2253 		sc->nvm.acquire = wm_get_eecd;
   2254 		sc->nvm.release = wm_put_eecd;
   2255 		if (reg & EECD_EE_TYPE) {
   2256 			/* SPI */
   2257 			sc->nvm.read = wm_nvm_read_spi;
   2258 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2259 			wm_nvm_set_addrbits_size_eecd(sc);
   2260 		} else {
   2261 			/* Microwire */
   2262 			sc->nvm.read = wm_nvm_read_uwire;
   2263 			if ((reg & EECD_EE_ABITS) != 0) {
   2264 				sc->sc_nvm_wordsize = 256;
   2265 				sc->sc_nvm_addrbits = 8;
   2266 			} else {
   2267 				sc->sc_nvm_wordsize = 64;
   2268 				sc->sc_nvm_addrbits = 6;
   2269 			}
   2270 		}
   2271 		break;
   2272 	case WM_T_82571:
   2273 	case WM_T_82572:
   2274 		/* SPI */
   2275 		sc->nvm.read = wm_nvm_read_eerd;
   2276 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2277 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2278 		wm_nvm_set_addrbits_size_eecd(sc);
   2279 		sc->phy.acquire = wm_get_swsm_semaphore;
   2280 		sc->phy.release = wm_put_swsm_semaphore;
   2281 		sc->nvm.acquire = wm_get_nvm_82571;
   2282 		sc->nvm.release = wm_put_nvm_82571;
   2283 		break;
   2284 	case WM_T_82573:
   2285 	case WM_T_82574:
   2286 	case WM_T_82583:
   2287 		sc->nvm.read = wm_nvm_read_eerd;
   2288 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2289 		if (sc->sc_type == WM_T_82573) {
   2290 			sc->phy.acquire = wm_get_swsm_semaphore;
   2291 			sc->phy.release = wm_put_swsm_semaphore;
   2292 			sc->nvm.acquire = wm_get_nvm_82571;
   2293 			sc->nvm.release = wm_put_nvm_82571;
   2294 		} else {
   2295 			/* Both PHY and NVM use the same semaphore. */
   2296 			sc->phy.acquire = sc->nvm.acquire
   2297 			    = wm_get_swfwhw_semaphore;
   2298 			sc->phy.release = sc->nvm.release
   2299 			    = wm_put_swfwhw_semaphore;
   2300 		}
   2301 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2302 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2303 			sc->sc_nvm_wordsize = 2048;
   2304 		} else {
   2305 			/* SPI */
   2306 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2307 			wm_nvm_set_addrbits_size_eecd(sc);
   2308 		}
   2309 		break;
   2310 	case WM_T_82575:
   2311 	case WM_T_82576:
   2312 	case WM_T_82580:
   2313 	case WM_T_I350:
   2314 	case WM_T_I354:
   2315 	case WM_T_80003:
   2316 		/* SPI */
   2317 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2318 		wm_nvm_set_addrbits_size_eecd(sc);
   2319 		if ((sc->sc_type == WM_T_80003)
   2320 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2321 			sc->nvm.read = wm_nvm_read_eerd;
   2322 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2323 		} else {
   2324 			sc->nvm.read = wm_nvm_read_spi;
   2325 			sc->sc_flags |= WM_F_LOCK_EECD;
   2326 		}
   2327 		sc->phy.acquire = wm_get_phy_82575;
   2328 		sc->phy.release = wm_put_phy_82575;
   2329 		sc->nvm.acquire = wm_get_nvm_80003;
   2330 		sc->nvm.release = wm_put_nvm_80003;
   2331 		break;
   2332 	case WM_T_ICH8:
   2333 	case WM_T_ICH9:
   2334 	case WM_T_ICH10:
   2335 	case WM_T_PCH:
   2336 	case WM_T_PCH2:
   2337 	case WM_T_PCH_LPT:
   2338 		sc->nvm.read = wm_nvm_read_ich8;
   2339 		/* FLASH */
   2340 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2341 		sc->sc_nvm_wordsize = 2048;
   2342 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2343 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2344 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2345 			aprint_error_dev(sc->sc_dev,
   2346 			    "can't map FLASH registers\n");
   2347 			goto out;
   2348 		}
   2349 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2350 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2351 		    ICH_FLASH_SECTOR_SIZE;
   2352 		sc->sc_ich8_flash_bank_size =
   2353 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2354 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2355 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2356 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2357 		sc->sc_flashreg_offset = 0;
   2358 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2359 		sc->phy.release = wm_put_swflag_ich8lan;
   2360 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2361 		sc->nvm.release = wm_put_nvm_ich8lan;
   2362 		break;
   2363 	case WM_T_PCH_SPT:
   2364 	case WM_T_PCH_CNP:
   2365 		sc->nvm.read = wm_nvm_read_spt;
   2366 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2367 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2368 		sc->sc_flasht = sc->sc_st;
   2369 		sc->sc_flashh = sc->sc_sh;
   2370 		sc->sc_ich8_flash_base = 0;
   2371 		sc->sc_nvm_wordsize =
   2372 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2373 		    * NVM_SIZE_MULTIPLIER;
   2374 		/* It is size in bytes, we want words */
   2375 		sc->sc_nvm_wordsize /= 2;
   2376 		/* Assume 2 banks */
   2377 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2378 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2379 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2380 		sc->phy.release = wm_put_swflag_ich8lan;
   2381 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2382 		sc->nvm.release = wm_put_nvm_ich8lan;
   2383 		break;
   2384 	case WM_T_I210:
   2385 	case WM_T_I211:
   2386 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2387 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2388 		if (wm_nvm_flash_presence_i210(sc)) {
   2389 			sc->nvm.read = wm_nvm_read_eerd;
   2390 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2391 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2392 			wm_nvm_set_addrbits_size_eecd(sc);
   2393 		} else {
   2394 			sc->nvm.read = wm_nvm_read_invm;
   2395 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2396 			sc->sc_nvm_wordsize = INVM_SIZE;
   2397 		}
   2398 		sc->phy.acquire = wm_get_phy_82575;
   2399 		sc->phy.release = wm_put_phy_82575;
   2400 		sc->nvm.acquire = wm_get_nvm_80003;
   2401 		sc->nvm.release = wm_put_nvm_80003;
   2402 		break;
   2403 	default:
   2404 		break;
   2405 	}
   2406 
   2407 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2408 	switch (sc->sc_type) {
   2409 	case WM_T_82571:
   2410 	case WM_T_82572:
   2411 		reg = CSR_READ(sc, WMREG_SWSM2);
   2412 		if ((reg & SWSM2_LOCK) == 0) {
   2413 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2414 			force_clear_smbi = true;
   2415 		} else
   2416 			force_clear_smbi = false;
   2417 		break;
   2418 	case WM_T_82573:
   2419 	case WM_T_82574:
   2420 	case WM_T_82583:
   2421 		force_clear_smbi = true;
   2422 		break;
   2423 	default:
   2424 		force_clear_smbi = false;
   2425 		break;
   2426 	}
   2427 	if (force_clear_smbi) {
   2428 		reg = CSR_READ(sc, WMREG_SWSM);
   2429 		if ((reg & SWSM_SMBI) != 0)
   2430 			aprint_error_dev(sc->sc_dev,
   2431 			    "Please update the Bootagent\n");
   2432 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2433 	}
   2434 
   2435 	/*
   2436 	 * Defer printing the EEPROM type until after verifying the checksum
   2437 	 * This allows the EEPROM type to be printed correctly in the case
   2438 	 * that no EEPROM is attached.
   2439 	 */
   2440 	/*
   2441 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2442 	 * this for later, so we can fail future reads from the EEPROM.
   2443 	 */
   2444 	if (wm_nvm_validate_checksum(sc)) {
   2445 		/*
   2446 		 * Read twice again because some PCI-e parts fail the
   2447 		 * first check due to the link being in sleep state.
   2448 		 */
   2449 		if (wm_nvm_validate_checksum(sc))
   2450 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2451 	}
   2452 
   2453 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2454 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2455 	else {
   2456 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2457 		    sc->sc_nvm_wordsize);
   2458 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2459 			aprint_verbose("iNVM");
   2460 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2461 			aprint_verbose("FLASH(HW)");
   2462 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2463 			aprint_verbose("FLASH");
   2464 		else {
   2465 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2466 				eetype = "SPI";
   2467 			else
   2468 				eetype = "MicroWire";
   2469 			aprint_verbose("(%d address bits) %s EEPROM",
   2470 			    sc->sc_nvm_addrbits, eetype);
   2471 		}
   2472 	}
   2473 	wm_nvm_version(sc);
   2474 	aprint_verbose("\n");
   2475 
   2476 	/*
   2477 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2478 	 * incorrect.
   2479 	 */
   2480 	wm_gmii_setup_phytype(sc, 0, 0);
   2481 
   2482 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2483 	switch (sc->sc_type) {
   2484 	case WM_T_ICH8:
   2485 	case WM_T_ICH9:
   2486 	case WM_T_ICH10:
   2487 	case WM_T_PCH:
   2488 	case WM_T_PCH2:
   2489 	case WM_T_PCH_LPT:
   2490 	case WM_T_PCH_SPT:
   2491 	case WM_T_PCH_CNP:
   2492 		apme_mask = WUC_APME;
   2493 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2494 		if ((eeprom_data & apme_mask) != 0)
   2495 			sc->sc_flags |= WM_F_WOL;
   2496 		break;
   2497 	default:
   2498 		break;
   2499 	}
   2500 
   2501 	/* Reset the chip to a known state. */
   2502 	wm_reset(sc);
   2503 
   2504 	/*
   2505 	 * Check for I21[01] PLL workaround.
   2506 	 *
   2507 	 * Three cases:
   2508 	 * a) Chip is I211.
   2509 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2510 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2511 	 */
   2512 	if (sc->sc_type == WM_T_I211)
   2513 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2514 	if (sc->sc_type == WM_T_I210) {
   2515 		if (!wm_nvm_flash_presence_i210(sc))
   2516 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2517 		else if ((sc->sc_nvm_ver_major < 3)
   2518 		    || ((sc->sc_nvm_ver_major == 3)
   2519 			&& (sc->sc_nvm_ver_minor < 25))) {
   2520 			aprint_verbose_dev(sc->sc_dev,
   2521 			    "ROM image version %d.%d is older than 3.25\n",
   2522 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2523 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2524 		}
   2525 	}
   2526 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2527 		wm_pll_workaround_i210(sc);
   2528 
   2529 	wm_get_wakeup(sc);
   2530 
   2531 	/* Non-AMT based hardware can now take control from firmware */
   2532 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2533 		wm_get_hw_control(sc);
   2534 
   2535 	/*
   2536 	 * Read the Ethernet address from the EEPROM, if not first found
   2537 	 * in device properties.
   2538 	 */
   2539 	ea = prop_dictionary_get(dict, "mac-address");
   2540 	if (ea != NULL) {
   2541 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2542 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2543 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2544 	} else {
   2545 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2546 			aprint_error_dev(sc->sc_dev,
   2547 			    "unable to read Ethernet address\n");
   2548 			goto out;
   2549 		}
   2550 	}
   2551 
   2552 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2553 	    ether_sprintf(enaddr));
   2554 
   2555 	/*
   2556 	 * Read the config info from the EEPROM, and set up various
   2557 	 * bits in the control registers based on their contents.
   2558 	 */
   2559 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2560 	if (pn != NULL) {
   2561 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2562 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2563 	} else {
   2564 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2565 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2566 			goto out;
   2567 		}
   2568 	}
   2569 
   2570 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2571 	if (pn != NULL) {
   2572 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2573 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2574 	} else {
   2575 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2576 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2577 			goto out;
   2578 		}
   2579 	}
   2580 
   2581 	/* check for WM_F_WOL */
   2582 	switch (sc->sc_type) {
   2583 	case WM_T_82542_2_0:
   2584 	case WM_T_82542_2_1:
   2585 	case WM_T_82543:
   2586 		/* dummy? */
   2587 		eeprom_data = 0;
   2588 		apme_mask = NVM_CFG3_APME;
   2589 		break;
   2590 	case WM_T_82544:
   2591 		apme_mask = NVM_CFG2_82544_APM_EN;
   2592 		eeprom_data = cfg2;
   2593 		break;
   2594 	case WM_T_82546:
   2595 	case WM_T_82546_3:
   2596 	case WM_T_82571:
   2597 	case WM_T_82572:
   2598 	case WM_T_82573:
   2599 	case WM_T_82574:
   2600 	case WM_T_82583:
   2601 	case WM_T_80003:
   2602 	case WM_T_82575:
   2603 	case WM_T_82576:
   2604 		apme_mask = NVM_CFG3_APME;
   2605 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2606 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2607 		break;
   2608 	case WM_T_82580:
   2609 	case WM_T_I350:
   2610 	case WM_T_I354:
   2611 	case WM_T_I210:
   2612 	case WM_T_I211:
   2613 		apme_mask = NVM_CFG3_APME;
   2614 		wm_nvm_read(sc,
   2615 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2616 		    1, &eeprom_data);
   2617 		break;
   2618 	case WM_T_ICH8:
   2619 	case WM_T_ICH9:
   2620 	case WM_T_ICH10:
   2621 	case WM_T_PCH:
   2622 	case WM_T_PCH2:
   2623 	case WM_T_PCH_LPT:
   2624 	case WM_T_PCH_SPT:
   2625 	case WM_T_PCH_CNP:
   2626 		/* Already checked before wm_reset () */
   2627 		apme_mask = eeprom_data = 0;
   2628 		break;
   2629 	default: /* XXX 82540 */
   2630 		apme_mask = NVM_CFG3_APME;
   2631 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2632 		break;
   2633 	}
   2634 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2635 	if ((eeprom_data & apme_mask) != 0)
   2636 		sc->sc_flags |= WM_F_WOL;
   2637 
   2638 	/*
   2639 	 * We have the eeprom settings, now apply the special cases
   2640 	 * where the eeprom may be wrong or the board won't support
   2641 	 * wake on lan on a particular port
   2642 	 */
   2643 	switch (sc->sc_pcidevid) {
   2644 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2645 		sc->sc_flags &= ~WM_F_WOL;
   2646 		break;
   2647 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2648 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2649 		/* Wake events only supported on port A for dual fiber
   2650 		 * regardless of eeprom setting */
   2651 		if (sc->sc_funcid == 1)
   2652 			sc->sc_flags &= ~WM_F_WOL;
   2653 		break;
   2654 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2655 		/* If quad port adapter, disable WoL on all but port A */
   2656 		if (sc->sc_funcid != 0)
   2657 			sc->sc_flags &= ~WM_F_WOL;
   2658 		break;
   2659 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2660 		/* Wake events only supported on port A for dual fiber
   2661 		 * regardless of eeprom setting */
   2662 		if (sc->sc_funcid == 1)
   2663 			sc->sc_flags &= ~WM_F_WOL;
   2664 		break;
   2665 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2666 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2667 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2668 		/* If quad port adapter, disable WoL on all but port A */
   2669 		if (sc->sc_funcid != 0)
   2670 			sc->sc_flags &= ~WM_F_WOL;
   2671 		break;
   2672 	}
   2673 
   2674 	if (sc->sc_type >= WM_T_82575) {
   2675 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2676 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2677 			    nvmword);
   2678 			if ((sc->sc_type == WM_T_82575) ||
   2679 			    (sc->sc_type == WM_T_82576)) {
   2680 				/* Check NVM for autonegotiation */
   2681 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2682 				    != 0)
   2683 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2684 			}
   2685 			if ((sc->sc_type == WM_T_82575) ||
   2686 			    (sc->sc_type == WM_T_I350)) {
   2687 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2688 					sc->sc_flags |= WM_F_MAS;
   2689 			}
   2690 		}
   2691 	}
   2692 
   2693 	/*
   2694 	 * XXX need special handling for some multiple port cards
   2695 	 * to disable a paticular port.
   2696 	 */
   2697 
   2698 	if (sc->sc_type >= WM_T_82544) {
   2699 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2700 		if (pn != NULL) {
   2701 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2702 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2703 		} else {
   2704 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2705 				aprint_error_dev(sc->sc_dev,
   2706 				    "unable to read SWDPIN\n");
   2707 				goto out;
   2708 			}
   2709 		}
   2710 	}
   2711 
   2712 	if (cfg1 & NVM_CFG1_ILOS)
   2713 		sc->sc_ctrl |= CTRL_ILOS;
   2714 
   2715 	/*
   2716 	 * XXX
   2717 	 * This code isn't correct because pin 2 and 3 are located
   2718 	 * in different position on newer chips. Check all datasheet.
   2719 	 *
   2720 	 * Until resolve this problem, check if a chip < 82580
   2721 	 */
   2722 	if (sc->sc_type <= WM_T_82580) {
   2723 		if (sc->sc_type >= WM_T_82544) {
   2724 			sc->sc_ctrl |=
   2725 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2726 			    CTRL_SWDPIO_SHIFT;
   2727 			sc->sc_ctrl |=
   2728 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2729 			    CTRL_SWDPINS_SHIFT;
   2730 		} else {
   2731 			sc->sc_ctrl |=
   2732 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2733 			    CTRL_SWDPIO_SHIFT;
   2734 		}
   2735 	}
   2736 
   2737 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2738 		wm_nvm_read(sc,
   2739 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2740 		    1, &nvmword);
   2741 		if (nvmword & NVM_CFG3_ILOS)
   2742 			sc->sc_ctrl |= CTRL_ILOS;
   2743 	}
   2744 
   2745 #if 0
   2746 	if (sc->sc_type >= WM_T_82544) {
   2747 		if (cfg1 & NVM_CFG1_IPS0)
   2748 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2749 		if (cfg1 & NVM_CFG1_IPS1)
   2750 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2751 		sc->sc_ctrl_ext |=
   2752 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2753 		    CTRL_EXT_SWDPIO_SHIFT;
   2754 		sc->sc_ctrl_ext |=
   2755 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2756 		    CTRL_EXT_SWDPINS_SHIFT;
   2757 	} else {
   2758 		sc->sc_ctrl_ext |=
   2759 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2760 		    CTRL_EXT_SWDPIO_SHIFT;
   2761 	}
   2762 #endif
   2763 
   2764 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2765 #if 0
   2766 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2767 #endif
   2768 
   2769 	if (sc->sc_type == WM_T_PCH) {
   2770 		uint16_t val;
   2771 
   2772 		/* Save the NVM K1 bit setting */
   2773 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2774 
   2775 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2776 			sc->sc_nvm_k1_enabled = 1;
   2777 		else
   2778 			sc->sc_nvm_k1_enabled = 0;
   2779 	}
   2780 
   2781 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2782 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2783 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2784 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2785 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2786 	    || sc->sc_type == WM_T_82573
   2787 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2788 		/* Copper only */
   2789 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2790 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2791 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2792 	    || (sc->sc_type ==WM_T_I211)) {
   2793 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2794 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2795 		switch (link_mode) {
   2796 		case CTRL_EXT_LINK_MODE_1000KX:
   2797 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2798 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2799 			break;
   2800 		case CTRL_EXT_LINK_MODE_SGMII:
   2801 			if (wm_sgmii_uses_mdio(sc)) {
   2802 				aprint_normal_dev(sc->sc_dev,
   2803 				    "SGMII(MDIO)\n");
   2804 				sc->sc_flags |= WM_F_SGMII;
   2805 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2806 				break;
   2807 			}
   2808 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2809 			/*FALLTHROUGH*/
   2810 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2811 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2812 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2813 				if (link_mode
   2814 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2815 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2816 					sc->sc_flags |= WM_F_SGMII;
   2817 					aprint_verbose_dev(sc->sc_dev,
   2818 					    "SGMII\n");
   2819 				} else {
   2820 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2821 					aprint_verbose_dev(sc->sc_dev,
   2822 					    "SERDES\n");
   2823 				}
   2824 				break;
   2825 			}
   2826 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2827 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2828 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2829 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2830 				sc->sc_flags |= WM_F_SGMII;
   2831 			}
   2832 			/* Do not change link mode for 100BaseFX */
   2833 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2834 				break;
   2835 
   2836 			/* Change current link mode setting */
   2837 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2838 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2839 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2840 			else
   2841 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2842 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2843 			break;
   2844 		case CTRL_EXT_LINK_MODE_GMII:
   2845 		default:
   2846 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2847 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2848 			break;
   2849 		}
   2850 
   2851 		reg &= ~CTRL_EXT_I2C_ENA;
   2852 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2853 			reg |= CTRL_EXT_I2C_ENA;
   2854 		else
   2855 			reg &= ~CTRL_EXT_I2C_ENA;
   2856 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2857 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2858 			wm_gmii_setup_phytype(sc, 0, 0);
   2859 			wm_reset_mdicnfg_82580(sc);
   2860 		}
   2861 	} else if (sc->sc_type < WM_T_82543 ||
   2862 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2863 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2864 			aprint_error_dev(sc->sc_dev,
   2865 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2866 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2867 		}
   2868 	} else {
   2869 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2870 			aprint_error_dev(sc->sc_dev,
   2871 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2872 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2873 		}
   2874 	}
   2875 
   2876 	if (sc->sc_type >= WM_T_PCH2)
   2877 		sc->sc_flags |= WM_F_EEE;
   2878 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2879 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2880 		/* XXX: Need special handling for I354. (not yet) */
   2881 		if (sc->sc_type != WM_T_I354)
   2882 			sc->sc_flags |= WM_F_EEE;
   2883 	}
   2884 
   2885 	/* Set device properties (macflags) */
   2886 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2887 
   2888 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2889 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2890 
   2891 #ifdef WM_MPSAFE
   2892 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2893 #else
   2894 	sc->sc_core_lock = NULL;
   2895 #endif
   2896 
   2897 	/* Initialize the media structures accordingly. */
   2898 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2899 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2900 	else
   2901 		wm_tbi_mediainit(sc); /* All others */
   2902 
   2903 	ifp = &sc->sc_ethercom.ec_if;
   2904 	xname = device_xname(sc->sc_dev);
   2905 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2906 	ifp->if_softc = sc;
   2907 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2908 #ifdef WM_MPSAFE
   2909 	ifp->if_extflags = IFEF_MPSAFE;
   2910 #endif
   2911 	ifp->if_ioctl = wm_ioctl;
   2912 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2913 		ifp->if_start = wm_nq_start;
   2914 		/*
   2915 		 * When the number of CPUs is one and the controller can use
   2916 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2917 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2918 		 * and the other is used for link status changing.
   2919 		 * In this situation, wm_nq_transmit() is disadvantageous
   2920 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2921 		 */
   2922 		if (wm_is_using_multiqueue(sc))
   2923 			ifp->if_transmit = wm_nq_transmit;
   2924 	} else {
   2925 		ifp->if_start = wm_start;
   2926 		/*
   2927 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2928 		 */
   2929 		if (wm_is_using_multiqueue(sc))
   2930 			ifp->if_transmit = wm_transmit;
   2931 	}
   2932 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2933 	ifp->if_init = wm_init;
   2934 	ifp->if_stop = wm_stop;
   2935 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2936 	IFQ_SET_READY(&ifp->if_snd);
   2937 
   2938 	/* Check for jumbo frame */
   2939 	switch (sc->sc_type) {
   2940 	case WM_T_82573:
   2941 		/* XXX limited to 9234 if ASPM is disabled */
   2942 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2943 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2944 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2945 		break;
   2946 	case WM_T_82571:
   2947 	case WM_T_82572:
   2948 	case WM_T_82574:
   2949 	case WM_T_82583:
   2950 	case WM_T_82575:
   2951 	case WM_T_82576:
   2952 	case WM_T_82580:
   2953 	case WM_T_I350:
   2954 	case WM_T_I354:
   2955 	case WM_T_I210:
   2956 	case WM_T_I211:
   2957 	case WM_T_80003:
   2958 	case WM_T_ICH9:
   2959 	case WM_T_ICH10:
   2960 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2961 	case WM_T_PCH_LPT:
   2962 	case WM_T_PCH_SPT:
   2963 	case WM_T_PCH_CNP:
   2964 		/* XXX limited to 9234 */
   2965 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2966 		break;
   2967 	case WM_T_PCH:
   2968 		/* XXX limited to 4096 */
   2969 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2970 		break;
   2971 	case WM_T_82542_2_0:
   2972 	case WM_T_82542_2_1:
   2973 	case WM_T_ICH8:
   2974 		/* No support for jumbo frame */
   2975 		break;
   2976 	default:
   2977 		/* ETHER_MAX_LEN_JUMBO */
   2978 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2979 		break;
   2980 	}
   2981 
   2982 	/* If we're a i82543 or greater, we can support VLANs. */
   2983 	if (sc->sc_type >= WM_T_82543) {
   2984 		sc->sc_ethercom.ec_capabilities |=
   2985 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2986 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   2987 	}
   2988 
   2989 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2990 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2991 
   2992 	/*
   2993 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2994 	 * on i82543 and later.
   2995 	 */
   2996 	if (sc->sc_type >= WM_T_82543) {
   2997 		ifp->if_capabilities |=
   2998 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2999 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3000 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3001 		    IFCAP_CSUM_TCPv6_Tx |
   3002 		    IFCAP_CSUM_UDPv6_Tx;
   3003 	}
   3004 
   3005 	/*
   3006 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3007 	 *
   3008 	 *	82541GI (8086:1076) ... no
   3009 	 *	82572EI (8086:10b9) ... yes
   3010 	 */
   3011 	if (sc->sc_type >= WM_T_82571) {
   3012 		ifp->if_capabilities |=
   3013 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3014 	}
   3015 
   3016 	/*
   3017 	 * If we're a i82544 or greater (except i82547), we can do
   3018 	 * TCP segmentation offload.
   3019 	 */
   3020 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3021 		ifp->if_capabilities |= IFCAP_TSOv4;
   3022 	}
   3023 
   3024 	if (sc->sc_type >= WM_T_82571) {
   3025 		ifp->if_capabilities |= IFCAP_TSOv6;
   3026 	}
   3027 
   3028 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3029 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3030 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3031 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3032 
   3033 	/* Attach the interface. */
   3034 	error = if_initialize(ifp);
   3035 	if (error != 0) {
   3036 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   3037 		    error);
   3038 		return; /* Error */
   3039 	}
   3040 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3041 	ether_ifattach(ifp, enaddr);
   3042 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3043 	if_register(ifp);
   3044 
   3045 #ifdef WM_EVENT_COUNTERS
   3046 	/* Attach event counters. */
   3047 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3048 	    NULL, xname, "linkintr");
   3049 
   3050 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3051 	    NULL, xname, "tx_xoff");
   3052 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3053 	    NULL, xname, "tx_xon");
   3054 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3055 	    NULL, xname, "rx_xoff");
   3056 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3057 	    NULL, xname, "rx_xon");
   3058 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3059 	    NULL, xname, "rx_macctl");
   3060 #endif /* WM_EVENT_COUNTERS */
   3061 
   3062 	sc->sc_txrx_use_workqueue = false;
   3063 
   3064 	wm_init_sysctls(sc);
   3065 
   3066 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3067 		pmf_class_network_register(self, ifp);
   3068 	else
   3069 		aprint_error_dev(self, "couldn't establish power handler\n");
   3070 
   3071 	sc->sc_flags |= WM_F_ATTACHED;
   3072 out:
   3073 	return;
   3074 }
   3075 
   3076 /* The detach function (ca_detach) */
   3077 static int
   3078 wm_detach(device_t self, int flags __unused)
   3079 {
   3080 	struct wm_softc *sc = device_private(self);
   3081 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3082 	int i;
   3083 
   3084 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3085 		return 0;
   3086 
   3087 	/* Stop the interface. Callouts are stopped in it. */
   3088 	wm_stop(ifp, 1);
   3089 
   3090 	pmf_device_deregister(self);
   3091 
   3092 	sysctl_teardown(&sc->sc_sysctllog);
   3093 
   3094 #ifdef WM_EVENT_COUNTERS
   3095 	evcnt_detach(&sc->sc_ev_linkintr);
   3096 
   3097 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3098 	evcnt_detach(&sc->sc_ev_tx_xon);
   3099 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3100 	evcnt_detach(&sc->sc_ev_rx_xon);
   3101 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3102 #endif /* WM_EVENT_COUNTERS */
   3103 
   3104 	/* Tell the firmware about the release */
   3105 	WM_CORE_LOCK(sc);
   3106 	wm_release_manageability(sc);
   3107 	wm_release_hw_control(sc);
   3108 	wm_enable_wakeup(sc);
   3109 	WM_CORE_UNLOCK(sc);
   3110 
   3111 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3112 
   3113 	ether_ifdetach(ifp);
   3114 	if_detach(ifp);
   3115 	if_percpuq_destroy(sc->sc_ipq);
   3116 
   3117 	/* Delete all remaining media. */
   3118 	ifmedia_fini(&sc->sc_mii.mii_media);
   3119 
   3120 	/* Unload RX dmamaps and free mbufs */
   3121 	for (i = 0; i < sc->sc_nqueues; i++) {
   3122 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3123 		mutex_enter(rxq->rxq_lock);
   3124 		wm_rxdrain(rxq);
   3125 		mutex_exit(rxq->rxq_lock);
   3126 	}
   3127 	/* Must unlock here */
   3128 
   3129 	/* Disestablish the interrupt handler */
   3130 	for (i = 0; i < sc->sc_nintrs; i++) {
   3131 		if (sc->sc_ihs[i] != NULL) {
   3132 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3133 			sc->sc_ihs[i] = NULL;
   3134 		}
   3135 	}
   3136 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3137 
   3138 	/* wm_stop() ensure workqueue is stopped. */
   3139 	workqueue_destroy(sc->sc_queue_wq);
   3140 
   3141 	for (i = 0; i < sc->sc_nqueues; i++)
   3142 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3143 
   3144 	wm_free_txrx_queues(sc);
   3145 
   3146 	/* Unmap the registers */
   3147 	if (sc->sc_ss) {
   3148 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3149 		sc->sc_ss = 0;
   3150 	}
   3151 	if (sc->sc_ios) {
   3152 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3153 		sc->sc_ios = 0;
   3154 	}
   3155 	if (sc->sc_flashs) {
   3156 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3157 		sc->sc_flashs = 0;
   3158 	}
   3159 
   3160 	if (sc->sc_core_lock)
   3161 		mutex_obj_free(sc->sc_core_lock);
   3162 	if (sc->sc_ich_phymtx)
   3163 		mutex_obj_free(sc->sc_ich_phymtx);
   3164 	if (sc->sc_ich_nvmmtx)
   3165 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3166 
   3167 	return 0;
   3168 }
   3169 
   3170 static bool
   3171 wm_suspend(device_t self, const pmf_qual_t *qual)
   3172 {
   3173 	struct wm_softc *sc = device_private(self);
   3174 
   3175 	wm_release_manageability(sc);
   3176 	wm_release_hw_control(sc);
   3177 	wm_enable_wakeup(sc);
   3178 
   3179 	return true;
   3180 }
   3181 
   3182 static bool
   3183 wm_resume(device_t self, const pmf_qual_t *qual)
   3184 {
   3185 	struct wm_softc *sc = device_private(self);
   3186 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3187 	pcireg_t reg;
   3188 	char buf[256];
   3189 
   3190 	reg = CSR_READ(sc, WMREG_WUS);
   3191 	if (reg != 0) {
   3192 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3193 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3194 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3195 	}
   3196 
   3197 	if (sc->sc_type >= WM_T_PCH2)
   3198 		wm_resume_workarounds_pchlan(sc);
   3199 	if ((ifp->if_flags & IFF_UP) == 0) {
   3200 		wm_reset(sc);
   3201 		/* Non-AMT based hardware can now take control from firmware */
   3202 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3203 			wm_get_hw_control(sc);
   3204 		wm_init_manageability(sc);
   3205 	} else {
   3206 		/*
   3207 		 * We called pmf_class_network_register(), so if_init() is
   3208 		 * automatically called when IFF_UP. wm_reset(),
   3209 		 * wm_get_hw_control() and wm_init_manageability() are called
   3210 		 * via wm_init().
   3211 		 */
   3212 	}
   3213 
   3214 	return true;
   3215 }
   3216 
   3217 /*
   3218  * wm_watchdog:		[ifnet interface function]
   3219  *
   3220  *	Watchdog timer handler.
   3221  */
   3222 static void
   3223 wm_watchdog(struct ifnet *ifp)
   3224 {
   3225 	int qid;
   3226 	struct wm_softc *sc = ifp->if_softc;
   3227 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3228 
   3229 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3230 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3231 
   3232 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3233 	}
   3234 
   3235 	/* IF any of queues hanged up, reset the interface. */
   3236 	if (hang_queue != 0) {
   3237 		(void)wm_init(ifp);
   3238 
   3239 		/*
   3240 		 * There are still some upper layer processing which call
   3241 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3242 		 */
   3243 		/* Try to get more packets going. */
   3244 		ifp->if_start(ifp);
   3245 	}
   3246 }
   3247 
   3248 
   3249 static void
   3250 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3251 {
   3252 
   3253 	mutex_enter(txq->txq_lock);
   3254 	if (txq->txq_sending &&
   3255 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3256 		wm_watchdog_txq_locked(ifp, txq, hang);
   3257 
   3258 	mutex_exit(txq->txq_lock);
   3259 }
   3260 
   3261 static void
   3262 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3263     uint16_t *hang)
   3264 {
   3265 	struct wm_softc *sc = ifp->if_softc;
   3266 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3267 
   3268 	KASSERT(mutex_owned(txq->txq_lock));
   3269 
   3270 	/*
   3271 	 * Since we're using delayed interrupts, sweep up
   3272 	 * before we report an error.
   3273 	 */
   3274 	wm_txeof(txq, UINT_MAX);
   3275 
   3276 	if (txq->txq_sending)
   3277 		*hang |= __BIT(wmq->wmq_id);
   3278 
   3279 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3280 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3281 		    device_xname(sc->sc_dev));
   3282 	} else {
   3283 #ifdef WM_DEBUG
   3284 		int i, j;
   3285 		struct wm_txsoft *txs;
   3286 #endif
   3287 		log(LOG_ERR,
   3288 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3289 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3290 		    txq->txq_next);
   3291 		if_statinc(ifp, if_oerrors);
   3292 #ifdef WM_DEBUG
   3293 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3294 		    i = WM_NEXTTXS(txq, i)) {
   3295 			txs = &txq->txq_soft[i];
   3296 			printf("txs %d tx %d -> %d\n",
   3297 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3298 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3299 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3300 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3301 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3302 					printf("\t %#08x%08x\n",
   3303 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3304 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3305 				} else {
   3306 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3307 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3308 					    txq->txq_descs[j].wtx_addr.wa_low);
   3309 					printf("\t %#04x%02x%02x%08x\n",
   3310 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3311 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3312 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3313 					    txq->txq_descs[j].wtx_cmdlen);
   3314 				}
   3315 				if (j == txs->txs_lastdesc)
   3316 					break;
   3317 			}
   3318 		}
   3319 #endif
   3320 	}
   3321 }
   3322 
   3323 /*
   3324  * wm_tick:
   3325  *
   3326  *	One second timer, used to check link status, sweep up
   3327  *	completed transmit jobs, etc.
   3328  */
   3329 static void
   3330 wm_tick(void *arg)
   3331 {
   3332 	struct wm_softc *sc = arg;
   3333 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3334 #ifndef WM_MPSAFE
   3335 	int s = splnet();
   3336 #endif
   3337 
   3338 	WM_CORE_LOCK(sc);
   3339 
   3340 	if (sc->sc_core_stopping) {
   3341 		WM_CORE_UNLOCK(sc);
   3342 #ifndef WM_MPSAFE
   3343 		splx(s);
   3344 #endif
   3345 		return;
   3346 	}
   3347 
   3348 	if (sc->sc_type >= WM_T_82542_2_1) {
   3349 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3350 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3351 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3352 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3353 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3354 	}
   3355 
   3356 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3357 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3358 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3359 	    + CSR_READ(sc, WMREG_CRCERRS)
   3360 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3361 	    + CSR_READ(sc, WMREG_SYMERRC)
   3362 	    + CSR_READ(sc, WMREG_RXERRC)
   3363 	    + CSR_READ(sc, WMREG_SEC)
   3364 	    + CSR_READ(sc, WMREG_CEXTERR)
   3365 	    + CSR_READ(sc, WMREG_RLEC));
   3366 	/*
   3367 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3368 	 * memory. It does not mean the number of dropped packet. Because
   3369 	 * ethernet controller can receive packets in such case if there is
   3370 	 * space in phy's FIFO.
   3371 	 *
   3372 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3373 	 * own EVCNT instead of if_iqdrops.
   3374 	 */
   3375 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3376 	IF_STAT_PUTREF(ifp);
   3377 
   3378 	if (sc->sc_flags & WM_F_HAS_MII)
   3379 		mii_tick(&sc->sc_mii);
   3380 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3381 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3382 		wm_serdes_tick(sc);
   3383 	else
   3384 		wm_tbi_tick(sc);
   3385 
   3386 	WM_CORE_UNLOCK(sc);
   3387 
   3388 	wm_watchdog(ifp);
   3389 
   3390 	callout_schedule(&sc->sc_tick_ch, hz);
   3391 }
   3392 
   3393 static int
   3394 wm_ifflags_cb(struct ethercom *ec)
   3395 {
   3396 	struct ifnet *ifp = &ec->ec_if;
   3397 	struct wm_softc *sc = ifp->if_softc;
   3398 	u_short iffchange;
   3399 	int ecchange;
   3400 	bool needreset = false;
   3401 	int rc = 0;
   3402 
   3403 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3404 		device_xname(sc->sc_dev), __func__));
   3405 
   3406 	WM_CORE_LOCK(sc);
   3407 
   3408 	/*
   3409 	 * Check for if_flags.
   3410 	 * Main usage is to prevent linkdown when opening bpf.
   3411 	 */
   3412 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3413 	sc->sc_if_flags = ifp->if_flags;
   3414 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3415 		needreset = true;
   3416 		goto ec;
   3417 	}
   3418 
   3419 	/* iff related updates */
   3420 	if ((iffchange & IFF_PROMISC) != 0)
   3421 		wm_set_filter(sc);
   3422 
   3423 	wm_set_vlan(sc);
   3424 
   3425 ec:
   3426 	/* Check for ec_capenable. */
   3427 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3428 	sc->sc_ec_capenable = ec->ec_capenable;
   3429 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3430 		needreset = true;
   3431 		goto out;
   3432 	}
   3433 
   3434 	/* ec related updates */
   3435 	wm_set_eee(sc);
   3436 
   3437 out:
   3438 	if (needreset)
   3439 		rc = ENETRESET;
   3440 	WM_CORE_UNLOCK(sc);
   3441 
   3442 	return rc;
   3443 }
   3444 
   3445 /*
   3446  * wm_ioctl:		[ifnet interface function]
   3447  *
   3448  *	Handle control requests from the operator.
   3449  */
   3450 static int
   3451 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3452 {
   3453 	struct wm_softc *sc = ifp->if_softc;
   3454 	struct ifreq *ifr = (struct ifreq *)data;
   3455 	struct ifaddr *ifa = (struct ifaddr *)data;
   3456 	struct sockaddr_dl *sdl;
   3457 	int s, error;
   3458 
   3459 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3460 		device_xname(sc->sc_dev), __func__));
   3461 
   3462 #ifndef WM_MPSAFE
   3463 	s = splnet();
   3464 #endif
   3465 	switch (cmd) {
   3466 	case SIOCSIFMEDIA:
   3467 		WM_CORE_LOCK(sc);
   3468 		/* Flow control requires full-duplex mode. */
   3469 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3470 		    (ifr->ifr_media & IFM_FDX) == 0)
   3471 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3472 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3473 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3474 				/* We can do both TXPAUSE and RXPAUSE. */
   3475 				ifr->ifr_media |=
   3476 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3477 			}
   3478 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3479 		}
   3480 		WM_CORE_UNLOCK(sc);
   3481 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3482 		break;
   3483 	case SIOCINITIFADDR:
   3484 		WM_CORE_LOCK(sc);
   3485 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3486 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3487 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3488 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3489 			/* Unicast address is the first multicast entry */
   3490 			wm_set_filter(sc);
   3491 			error = 0;
   3492 			WM_CORE_UNLOCK(sc);
   3493 			break;
   3494 		}
   3495 		WM_CORE_UNLOCK(sc);
   3496 		/*FALLTHROUGH*/
   3497 	default:
   3498 #ifdef WM_MPSAFE
   3499 		s = splnet();
   3500 #endif
   3501 		/* It may call wm_start, so unlock here */
   3502 		error = ether_ioctl(ifp, cmd, data);
   3503 #ifdef WM_MPSAFE
   3504 		splx(s);
   3505 #endif
   3506 		if (error != ENETRESET)
   3507 			break;
   3508 
   3509 		error = 0;
   3510 
   3511 		if (cmd == SIOCSIFCAP)
   3512 			error = (*ifp->if_init)(ifp);
   3513 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3514 			;
   3515 		else if (ifp->if_flags & IFF_RUNNING) {
   3516 			/*
   3517 			 * Multicast list has changed; set the hardware filter
   3518 			 * accordingly.
   3519 			 */
   3520 			WM_CORE_LOCK(sc);
   3521 			wm_set_filter(sc);
   3522 			WM_CORE_UNLOCK(sc);
   3523 		}
   3524 		break;
   3525 	}
   3526 
   3527 #ifndef WM_MPSAFE
   3528 	splx(s);
   3529 #endif
   3530 	return error;
   3531 }
   3532 
   3533 /* MAC address related */
   3534 
   3535 /*
   3536  * Get the offset of MAC address and return it.
   3537  * If error occured, use offset 0.
   3538  */
   3539 static uint16_t
   3540 wm_check_alt_mac_addr(struct wm_softc *sc)
   3541 {
   3542 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3543 	uint16_t offset = NVM_OFF_MACADDR;
   3544 
   3545 	/* Try to read alternative MAC address pointer */
   3546 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3547 		return 0;
   3548 
   3549 	/* Check pointer if it's valid or not. */
   3550 	if ((offset == 0x0000) || (offset == 0xffff))
   3551 		return 0;
   3552 
   3553 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3554 	/*
   3555 	 * Check whether alternative MAC address is valid or not.
   3556 	 * Some cards have non 0xffff pointer but those don't use
   3557 	 * alternative MAC address in reality.
   3558 	 *
   3559 	 * Check whether the broadcast bit is set or not.
   3560 	 */
   3561 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3562 		if (((myea[0] & 0xff) & 0x01) == 0)
   3563 			return offset; /* Found */
   3564 
   3565 	/* Not found */
   3566 	return 0;
   3567 }
   3568 
   3569 static int
   3570 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3571 {
   3572 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3573 	uint16_t offset = NVM_OFF_MACADDR;
   3574 	int do_invert = 0;
   3575 
   3576 	switch (sc->sc_type) {
   3577 	case WM_T_82580:
   3578 	case WM_T_I350:
   3579 	case WM_T_I354:
   3580 		/* EEPROM Top Level Partitioning */
   3581 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3582 		break;
   3583 	case WM_T_82571:
   3584 	case WM_T_82575:
   3585 	case WM_T_82576:
   3586 	case WM_T_80003:
   3587 	case WM_T_I210:
   3588 	case WM_T_I211:
   3589 		offset = wm_check_alt_mac_addr(sc);
   3590 		if (offset == 0)
   3591 			if ((sc->sc_funcid & 0x01) == 1)
   3592 				do_invert = 1;
   3593 		break;
   3594 	default:
   3595 		if ((sc->sc_funcid & 0x01) == 1)
   3596 			do_invert = 1;
   3597 		break;
   3598 	}
   3599 
   3600 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3601 		goto bad;
   3602 
   3603 	enaddr[0] = myea[0] & 0xff;
   3604 	enaddr[1] = myea[0] >> 8;
   3605 	enaddr[2] = myea[1] & 0xff;
   3606 	enaddr[3] = myea[1] >> 8;
   3607 	enaddr[4] = myea[2] & 0xff;
   3608 	enaddr[5] = myea[2] >> 8;
   3609 
   3610 	/*
   3611 	 * Toggle the LSB of the MAC address on the second port
   3612 	 * of some dual port cards.
   3613 	 */
   3614 	if (do_invert != 0)
   3615 		enaddr[5] ^= 1;
   3616 
   3617 	return 0;
   3618 
   3619  bad:
   3620 	return -1;
   3621 }
   3622 
   3623 /*
   3624  * wm_set_ral:
   3625  *
   3626  *	Set an entery in the receive address list.
   3627  */
   3628 static void
   3629 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3630 {
   3631 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3632 	uint32_t wlock_mac;
   3633 	int rv;
   3634 
   3635 	if (enaddr != NULL) {
   3636 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3637 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3638 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3639 		ral_hi |= RAL_AV;
   3640 	} else {
   3641 		ral_lo = 0;
   3642 		ral_hi = 0;
   3643 	}
   3644 
   3645 	switch (sc->sc_type) {
   3646 	case WM_T_82542_2_0:
   3647 	case WM_T_82542_2_1:
   3648 	case WM_T_82543:
   3649 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3650 		CSR_WRITE_FLUSH(sc);
   3651 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3652 		CSR_WRITE_FLUSH(sc);
   3653 		break;
   3654 	case WM_T_PCH2:
   3655 	case WM_T_PCH_LPT:
   3656 	case WM_T_PCH_SPT:
   3657 	case WM_T_PCH_CNP:
   3658 		if (idx == 0) {
   3659 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3660 			CSR_WRITE_FLUSH(sc);
   3661 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3662 			CSR_WRITE_FLUSH(sc);
   3663 			return;
   3664 		}
   3665 		if (sc->sc_type != WM_T_PCH2) {
   3666 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3667 			    FWSM_WLOCK_MAC);
   3668 			addrl = WMREG_SHRAL(idx - 1);
   3669 			addrh = WMREG_SHRAH(idx - 1);
   3670 		} else {
   3671 			wlock_mac = 0;
   3672 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3673 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3674 		}
   3675 
   3676 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3677 			rv = wm_get_swflag_ich8lan(sc);
   3678 			if (rv != 0)
   3679 				return;
   3680 			CSR_WRITE(sc, addrl, ral_lo);
   3681 			CSR_WRITE_FLUSH(sc);
   3682 			CSR_WRITE(sc, addrh, ral_hi);
   3683 			CSR_WRITE_FLUSH(sc);
   3684 			wm_put_swflag_ich8lan(sc);
   3685 		}
   3686 
   3687 		break;
   3688 	default:
   3689 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3690 		CSR_WRITE_FLUSH(sc);
   3691 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3692 		CSR_WRITE_FLUSH(sc);
   3693 		break;
   3694 	}
   3695 }
   3696 
   3697 /*
   3698  * wm_mchash:
   3699  *
   3700  *	Compute the hash of the multicast address for the 4096-bit
   3701  *	multicast filter.
   3702  */
   3703 static uint32_t
   3704 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3705 {
   3706 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3707 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3708 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3709 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3710 	uint32_t hash;
   3711 
   3712 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3713 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3714 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3715 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3716 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3717 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3718 		return (hash & 0x3ff);
   3719 	}
   3720 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3721 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3722 
   3723 	return (hash & 0xfff);
   3724 }
   3725 
   3726 /*
   3727  *
   3728  *
   3729  */
   3730 static int
   3731 wm_rar_count(struct wm_softc *sc)
   3732 {
   3733 	int size;
   3734 
   3735 	switch (sc->sc_type) {
   3736 	case WM_T_ICH8:
   3737 		size = WM_RAL_TABSIZE_ICH8 -1;
   3738 		break;
   3739 	case WM_T_ICH9:
   3740 	case WM_T_ICH10:
   3741 	case WM_T_PCH:
   3742 		size = WM_RAL_TABSIZE_ICH8;
   3743 		break;
   3744 	case WM_T_PCH2:
   3745 		size = WM_RAL_TABSIZE_PCH2;
   3746 		break;
   3747 	case WM_T_PCH_LPT:
   3748 	case WM_T_PCH_SPT:
   3749 	case WM_T_PCH_CNP:
   3750 		size = WM_RAL_TABSIZE_PCH_LPT;
   3751 		break;
   3752 	case WM_T_82575:
   3753 	case WM_T_I210:
   3754 	case WM_T_I211:
   3755 		size = WM_RAL_TABSIZE_82575;
   3756 		break;
   3757 	case WM_T_82576:
   3758 	case WM_T_82580:
   3759 		size = WM_RAL_TABSIZE_82576;
   3760 		break;
   3761 	case WM_T_I350:
   3762 	case WM_T_I354:
   3763 		size = WM_RAL_TABSIZE_I350;
   3764 		break;
   3765 	default:
   3766 		size = WM_RAL_TABSIZE;
   3767 	}
   3768 
   3769 	return size;
   3770 }
   3771 
   3772 /*
   3773  * wm_set_filter:
   3774  *
   3775  *	Set up the receive filter.
   3776  */
   3777 static void
   3778 wm_set_filter(struct wm_softc *sc)
   3779 {
   3780 	struct ethercom *ec = &sc->sc_ethercom;
   3781 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3782 	struct ether_multi *enm;
   3783 	struct ether_multistep step;
   3784 	bus_addr_t mta_reg;
   3785 	uint32_t hash, reg, bit;
   3786 	int i, size, ralmax;
   3787 
   3788 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3789 		device_xname(sc->sc_dev), __func__));
   3790 
   3791 	if (sc->sc_type >= WM_T_82544)
   3792 		mta_reg = WMREG_CORDOVA_MTA;
   3793 	else
   3794 		mta_reg = WMREG_MTA;
   3795 
   3796 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3797 
   3798 	if (ifp->if_flags & IFF_BROADCAST)
   3799 		sc->sc_rctl |= RCTL_BAM;
   3800 	if (ifp->if_flags & IFF_PROMISC) {
   3801 		sc->sc_rctl |= RCTL_UPE;
   3802 		ETHER_LOCK(ec);
   3803 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3804 		ETHER_UNLOCK(ec);
   3805 		goto allmulti;
   3806 	}
   3807 
   3808 	/*
   3809 	 * Set the station address in the first RAL slot, and
   3810 	 * clear the remaining slots.
   3811 	 */
   3812 	size = wm_rar_count(sc);
   3813 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3814 
   3815 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3816 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3817 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3818 		switch (i) {
   3819 		case 0:
   3820 			/* We can use all entries */
   3821 			ralmax = size;
   3822 			break;
   3823 		case 1:
   3824 			/* Only RAR[0] */
   3825 			ralmax = 1;
   3826 			break;
   3827 		default:
   3828 			/* Available SHRA + RAR[0] */
   3829 			ralmax = i + 1;
   3830 		}
   3831 	} else
   3832 		ralmax = size;
   3833 	for (i = 1; i < size; i++) {
   3834 		if (i < ralmax)
   3835 			wm_set_ral(sc, NULL, i);
   3836 	}
   3837 
   3838 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3839 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3840 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3841 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3842 		size = WM_ICH8_MC_TABSIZE;
   3843 	else
   3844 		size = WM_MC_TABSIZE;
   3845 	/* Clear out the multicast table. */
   3846 	for (i = 0; i < size; i++) {
   3847 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3848 		CSR_WRITE_FLUSH(sc);
   3849 	}
   3850 
   3851 	ETHER_LOCK(ec);
   3852 	ETHER_FIRST_MULTI(step, ec, enm);
   3853 	while (enm != NULL) {
   3854 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3855 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3856 			ETHER_UNLOCK(ec);
   3857 			/*
   3858 			 * We must listen to a range of multicast addresses.
   3859 			 * For now, just accept all multicasts, rather than
   3860 			 * trying to set only those filter bits needed to match
   3861 			 * the range.  (At this time, the only use of address
   3862 			 * ranges is for IP multicast routing, for which the
   3863 			 * range is big enough to require all bits set.)
   3864 			 */
   3865 			goto allmulti;
   3866 		}
   3867 
   3868 		hash = wm_mchash(sc, enm->enm_addrlo);
   3869 
   3870 		reg = (hash >> 5);
   3871 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3872 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3873 		    || (sc->sc_type == WM_T_PCH2)
   3874 		    || (sc->sc_type == WM_T_PCH_LPT)
   3875 		    || (sc->sc_type == WM_T_PCH_SPT)
   3876 		    || (sc->sc_type == WM_T_PCH_CNP))
   3877 			reg &= 0x1f;
   3878 		else
   3879 			reg &= 0x7f;
   3880 		bit = hash & 0x1f;
   3881 
   3882 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3883 		hash |= 1U << bit;
   3884 
   3885 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3886 			/*
   3887 			 * 82544 Errata 9: Certain register cannot be written
   3888 			 * with particular alignments in PCI-X bus operation
   3889 			 * (FCAH, MTA and VFTA).
   3890 			 */
   3891 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3892 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3893 			CSR_WRITE_FLUSH(sc);
   3894 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3895 			CSR_WRITE_FLUSH(sc);
   3896 		} else {
   3897 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3898 			CSR_WRITE_FLUSH(sc);
   3899 		}
   3900 
   3901 		ETHER_NEXT_MULTI(step, enm);
   3902 	}
   3903 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   3904 	ETHER_UNLOCK(ec);
   3905 
   3906 	goto setit;
   3907 
   3908  allmulti:
   3909 	sc->sc_rctl |= RCTL_MPE;
   3910 
   3911  setit:
   3912 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3913 }
   3914 
   3915 /* Reset and init related */
   3916 
   3917 static void
   3918 wm_set_vlan(struct wm_softc *sc)
   3919 {
   3920 
   3921 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3922 		device_xname(sc->sc_dev), __func__));
   3923 
   3924 	/* Deal with VLAN enables. */
   3925 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3926 		sc->sc_ctrl |= CTRL_VME;
   3927 	else
   3928 		sc->sc_ctrl &= ~CTRL_VME;
   3929 
   3930 	/* Write the control registers. */
   3931 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3932 }
   3933 
   3934 static void
   3935 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3936 {
   3937 	uint32_t gcr;
   3938 	pcireg_t ctrl2;
   3939 
   3940 	gcr = CSR_READ(sc, WMREG_GCR);
   3941 
   3942 	/* Only take action if timeout value is defaulted to 0 */
   3943 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3944 		goto out;
   3945 
   3946 	if ((gcr & GCR_CAP_VER2) == 0) {
   3947 		gcr |= GCR_CMPL_TMOUT_10MS;
   3948 		goto out;
   3949 	}
   3950 
   3951 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3952 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3953 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3954 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3955 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3956 
   3957 out:
   3958 	/* Disable completion timeout resend */
   3959 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3960 
   3961 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3962 }
   3963 
   3964 void
   3965 wm_get_auto_rd_done(struct wm_softc *sc)
   3966 {
   3967 	int i;
   3968 
   3969 	/* wait for eeprom to reload */
   3970 	switch (sc->sc_type) {
   3971 	case WM_T_82571:
   3972 	case WM_T_82572:
   3973 	case WM_T_82573:
   3974 	case WM_T_82574:
   3975 	case WM_T_82583:
   3976 	case WM_T_82575:
   3977 	case WM_T_82576:
   3978 	case WM_T_82580:
   3979 	case WM_T_I350:
   3980 	case WM_T_I354:
   3981 	case WM_T_I210:
   3982 	case WM_T_I211:
   3983 	case WM_T_80003:
   3984 	case WM_T_ICH8:
   3985 	case WM_T_ICH9:
   3986 		for (i = 0; i < 10; i++) {
   3987 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3988 				break;
   3989 			delay(1000);
   3990 		}
   3991 		if (i == 10) {
   3992 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3993 			    "complete\n", device_xname(sc->sc_dev));
   3994 		}
   3995 		break;
   3996 	default:
   3997 		break;
   3998 	}
   3999 }
   4000 
   4001 void
   4002 wm_lan_init_done(struct wm_softc *sc)
   4003 {
   4004 	uint32_t reg = 0;
   4005 	int i;
   4006 
   4007 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4008 		device_xname(sc->sc_dev), __func__));
   4009 
   4010 	/* Wait for eeprom to reload */
   4011 	switch (sc->sc_type) {
   4012 	case WM_T_ICH10:
   4013 	case WM_T_PCH:
   4014 	case WM_T_PCH2:
   4015 	case WM_T_PCH_LPT:
   4016 	case WM_T_PCH_SPT:
   4017 	case WM_T_PCH_CNP:
   4018 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4019 			reg = CSR_READ(sc, WMREG_STATUS);
   4020 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4021 				break;
   4022 			delay(100);
   4023 		}
   4024 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4025 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4026 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4027 		}
   4028 		break;
   4029 	default:
   4030 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4031 		    __func__);
   4032 		break;
   4033 	}
   4034 
   4035 	reg &= ~STATUS_LAN_INIT_DONE;
   4036 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4037 }
   4038 
   4039 void
   4040 wm_get_cfg_done(struct wm_softc *sc)
   4041 {
   4042 	int mask;
   4043 	uint32_t reg;
   4044 	int i;
   4045 
   4046 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4047 		device_xname(sc->sc_dev), __func__));
   4048 
   4049 	/* Wait for eeprom to reload */
   4050 	switch (sc->sc_type) {
   4051 	case WM_T_82542_2_0:
   4052 	case WM_T_82542_2_1:
   4053 		/* null */
   4054 		break;
   4055 	case WM_T_82543:
   4056 	case WM_T_82544:
   4057 	case WM_T_82540:
   4058 	case WM_T_82545:
   4059 	case WM_T_82545_3:
   4060 	case WM_T_82546:
   4061 	case WM_T_82546_3:
   4062 	case WM_T_82541:
   4063 	case WM_T_82541_2:
   4064 	case WM_T_82547:
   4065 	case WM_T_82547_2:
   4066 	case WM_T_82573:
   4067 	case WM_T_82574:
   4068 	case WM_T_82583:
   4069 		/* generic */
   4070 		delay(10*1000);
   4071 		break;
   4072 	case WM_T_80003:
   4073 	case WM_T_82571:
   4074 	case WM_T_82572:
   4075 	case WM_T_82575:
   4076 	case WM_T_82576:
   4077 	case WM_T_82580:
   4078 	case WM_T_I350:
   4079 	case WM_T_I354:
   4080 	case WM_T_I210:
   4081 	case WM_T_I211:
   4082 		if (sc->sc_type == WM_T_82571) {
   4083 			/* Only 82571 shares port 0 */
   4084 			mask = EEMNGCTL_CFGDONE_0;
   4085 		} else
   4086 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4087 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4088 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4089 				break;
   4090 			delay(1000);
   4091 		}
   4092 		if (i >= WM_PHY_CFG_TIMEOUT)
   4093 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   4094 				device_xname(sc->sc_dev), __func__));
   4095 		break;
   4096 	case WM_T_ICH8:
   4097 	case WM_T_ICH9:
   4098 	case WM_T_ICH10:
   4099 	case WM_T_PCH:
   4100 	case WM_T_PCH2:
   4101 	case WM_T_PCH_LPT:
   4102 	case WM_T_PCH_SPT:
   4103 	case WM_T_PCH_CNP:
   4104 		delay(10*1000);
   4105 		if (sc->sc_type >= WM_T_ICH10)
   4106 			wm_lan_init_done(sc);
   4107 		else
   4108 			wm_get_auto_rd_done(sc);
   4109 
   4110 		/* Clear PHY Reset Asserted bit */
   4111 		reg = CSR_READ(sc, WMREG_STATUS);
   4112 		if ((reg & STATUS_PHYRA) != 0)
   4113 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4114 		break;
   4115 	default:
   4116 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4117 		    __func__);
   4118 		break;
   4119 	}
   4120 }
   4121 
   4122 int
   4123 wm_phy_post_reset(struct wm_softc *sc)
   4124 {
   4125 	device_t dev = sc->sc_dev;
   4126 	uint16_t reg;
   4127 	int rv = 0;
   4128 
   4129 	/* This function is only for ICH8 and newer. */
   4130 	if (sc->sc_type < WM_T_ICH8)
   4131 		return 0;
   4132 
   4133 	if (wm_phy_resetisblocked(sc)) {
   4134 		/* XXX */
   4135 		device_printf(dev, "PHY is blocked\n");
   4136 		return -1;
   4137 	}
   4138 
   4139 	/* Allow time for h/w to get to quiescent state after reset */
   4140 	delay(10*1000);
   4141 
   4142 	/* Perform any necessary post-reset workarounds */
   4143 	if (sc->sc_type == WM_T_PCH)
   4144 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4145 	else if (sc->sc_type == WM_T_PCH2)
   4146 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4147 	if (rv != 0)
   4148 		return rv;
   4149 
   4150 	/* Clear the host wakeup bit after lcd reset */
   4151 	if (sc->sc_type >= WM_T_PCH) {
   4152 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4153 		reg &= ~BM_WUC_HOST_WU_BIT;
   4154 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4155 	}
   4156 
   4157 	/* Configure the LCD with the extended configuration region in NVM */
   4158 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4159 		return rv;
   4160 
   4161 	/* Configure the LCD with the OEM bits in NVM */
   4162 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4163 
   4164 	if (sc->sc_type == WM_T_PCH2) {
   4165 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4166 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4167 			delay(10 * 1000);
   4168 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4169 		}
   4170 		/* Set EEE LPI Update Timer to 200usec */
   4171 		rv = sc->phy.acquire(sc);
   4172 		if (rv)
   4173 			return rv;
   4174 		rv = wm_write_emi_reg_locked(dev,
   4175 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4176 		sc->phy.release(sc);
   4177 	}
   4178 
   4179 	return rv;
   4180 }
   4181 
   4182 /* Only for PCH and newer */
   4183 static int
   4184 wm_write_smbus_addr(struct wm_softc *sc)
   4185 {
   4186 	uint32_t strap, freq;
   4187 	uint16_t phy_data;
   4188 	int rv;
   4189 
   4190 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4191 		device_xname(sc->sc_dev), __func__));
   4192 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4193 
   4194 	strap = CSR_READ(sc, WMREG_STRAP);
   4195 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4196 
   4197 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4198 	if (rv != 0)
   4199 		return -1;
   4200 
   4201 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4202 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4203 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4204 
   4205 	if (sc->sc_phytype == WMPHY_I217) {
   4206 		/* Restore SMBus frequency */
   4207 		if (freq --) {
   4208 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4209 			    | HV_SMB_ADDR_FREQ_HIGH);
   4210 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4211 			    HV_SMB_ADDR_FREQ_LOW);
   4212 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4213 			    HV_SMB_ADDR_FREQ_HIGH);
   4214 		} else
   4215 			DPRINTF(WM_DEBUG_INIT,
   4216 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4217 				device_xname(sc->sc_dev), __func__));
   4218 	}
   4219 
   4220 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4221 	    phy_data);
   4222 }
   4223 
   4224 static int
   4225 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4226 {
   4227 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4228 	uint16_t phy_page = 0;
   4229 	int rv = 0;
   4230 
   4231 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4232 		device_xname(sc->sc_dev), __func__));
   4233 
   4234 	switch (sc->sc_type) {
   4235 	case WM_T_ICH8:
   4236 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4237 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4238 			return 0;
   4239 
   4240 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4241 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4242 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4243 			break;
   4244 		}
   4245 		/* FALLTHROUGH */
   4246 	case WM_T_PCH:
   4247 	case WM_T_PCH2:
   4248 	case WM_T_PCH_LPT:
   4249 	case WM_T_PCH_SPT:
   4250 	case WM_T_PCH_CNP:
   4251 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4252 		break;
   4253 	default:
   4254 		return 0;
   4255 	}
   4256 
   4257 	if ((rv = sc->phy.acquire(sc)) != 0)
   4258 		return rv;
   4259 
   4260 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4261 	if ((reg & sw_cfg_mask) == 0)
   4262 		goto release;
   4263 
   4264 	/*
   4265 	 * Make sure HW does not configure LCD from PHY extended configuration
   4266 	 * before SW configuration
   4267 	 */
   4268 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4269 	if ((sc->sc_type < WM_T_PCH2)
   4270 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4271 		goto release;
   4272 
   4273 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4274 		device_xname(sc->sc_dev), __func__));
   4275 	/* word_addr is in DWORD */
   4276 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4277 
   4278 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4279 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4280 	if (cnf_size == 0)
   4281 		goto release;
   4282 
   4283 	if (((sc->sc_type == WM_T_PCH)
   4284 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4285 	    || (sc->sc_type > WM_T_PCH)) {
   4286 		/*
   4287 		 * HW configures the SMBus address and LEDs when the OEM and
   4288 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4289 		 * are cleared, SW will configure them instead.
   4290 		 */
   4291 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4292 			device_xname(sc->sc_dev), __func__));
   4293 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4294 			goto release;
   4295 
   4296 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4297 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4298 		    (uint16_t)reg);
   4299 		if (rv != 0)
   4300 			goto release;
   4301 	}
   4302 
   4303 	/* Configure LCD from extended configuration region. */
   4304 	for (i = 0; i < cnf_size; i++) {
   4305 		uint16_t reg_data, reg_addr;
   4306 
   4307 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4308 			goto release;
   4309 
   4310 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4311 			goto release;
   4312 
   4313 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4314 			phy_page = reg_data;
   4315 
   4316 		reg_addr &= IGPHY_MAXREGADDR;
   4317 		reg_addr |= phy_page;
   4318 
   4319 		KASSERT(sc->phy.writereg_locked != NULL);
   4320 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4321 		    reg_data);
   4322 	}
   4323 
   4324 release:
   4325 	sc->phy.release(sc);
   4326 	return rv;
   4327 }
   4328 
   4329 /*
   4330  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4331  *  @sc:       pointer to the HW structure
   4332  *  @d0_state: boolean if entering d0 or d3 device state
   4333  *
   4334  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4335  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4336  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4337  */
   4338 int
   4339 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4340 {
   4341 	uint32_t mac_reg;
   4342 	uint16_t oem_reg;
   4343 	int rv;
   4344 
   4345 	if (sc->sc_type < WM_T_PCH)
   4346 		return 0;
   4347 
   4348 	rv = sc->phy.acquire(sc);
   4349 	if (rv != 0)
   4350 		return rv;
   4351 
   4352 	if (sc->sc_type == WM_T_PCH) {
   4353 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4354 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4355 			goto release;
   4356 	}
   4357 
   4358 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4359 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4360 		goto release;
   4361 
   4362 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4363 
   4364 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4365 	if (rv != 0)
   4366 		goto release;
   4367 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4368 
   4369 	if (d0_state) {
   4370 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4371 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4372 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4373 			oem_reg |= HV_OEM_BITS_LPLU;
   4374 	} else {
   4375 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4376 		    != 0)
   4377 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4378 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4379 		    != 0)
   4380 			oem_reg |= HV_OEM_BITS_LPLU;
   4381 	}
   4382 
   4383 	/* Set Restart auto-neg to activate the bits */
   4384 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4385 	    && (wm_phy_resetisblocked(sc) == false))
   4386 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4387 
   4388 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4389 
   4390 release:
   4391 	sc->phy.release(sc);
   4392 
   4393 	return rv;
   4394 }
   4395 
   4396 /* Init hardware bits */
   4397 void
   4398 wm_initialize_hardware_bits(struct wm_softc *sc)
   4399 {
   4400 	uint32_t tarc0, tarc1, reg;
   4401 
   4402 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4403 		device_xname(sc->sc_dev), __func__));
   4404 
   4405 	/* For 82571 variant, 80003 and ICHs */
   4406 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4407 	    || (sc->sc_type >= WM_T_80003)) {
   4408 
   4409 		/* Transmit Descriptor Control 0 */
   4410 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4411 		reg |= TXDCTL_COUNT_DESC;
   4412 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4413 
   4414 		/* Transmit Descriptor Control 1 */
   4415 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4416 		reg |= TXDCTL_COUNT_DESC;
   4417 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4418 
   4419 		/* TARC0 */
   4420 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4421 		switch (sc->sc_type) {
   4422 		case WM_T_82571:
   4423 		case WM_T_82572:
   4424 		case WM_T_82573:
   4425 		case WM_T_82574:
   4426 		case WM_T_82583:
   4427 		case WM_T_80003:
   4428 			/* Clear bits 30..27 */
   4429 			tarc0 &= ~__BITS(30, 27);
   4430 			break;
   4431 		default:
   4432 			break;
   4433 		}
   4434 
   4435 		switch (sc->sc_type) {
   4436 		case WM_T_82571:
   4437 		case WM_T_82572:
   4438 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4439 
   4440 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4441 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4442 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4443 			/* 8257[12] Errata No.7 */
   4444 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4445 
   4446 			/* TARC1 bit 28 */
   4447 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4448 				tarc1 &= ~__BIT(28);
   4449 			else
   4450 				tarc1 |= __BIT(28);
   4451 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4452 
   4453 			/*
   4454 			 * 8257[12] Errata No.13
   4455 			 * Disable Dyamic Clock Gating.
   4456 			 */
   4457 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4458 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4459 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4460 			break;
   4461 		case WM_T_82573:
   4462 		case WM_T_82574:
   4463 		case WM_T_82583:
   4464 			if ((sc->sc_type == WM_T_82574)
   4465 			    || (sc->sc_type == WM_T_82583))
   4466 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4467 
   4468 			/* Extended Device Control */
   4469 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4470 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4471 			reg |= __BIT(22);	/* Set bit 22 */
   4472 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4473 
   4474 			/* Device Control */
   4475 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4476 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4477 
   4478 			/* PCIe Control Register */
   4479 			/*
   4480 			 * 82573 Errata (unknown).
   4481 			 *
   4482 			 * 82574 Errata 25 and 82583 Errata 12
   4483 			 * "Dropped Rx Packets":
   4484 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4485 			 */
   4486 			reg = CSR_READ(sc, WMREG_GCR);
   4487 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4488 			CSR_WRITE(sc, WMREG_GCR, reg);
   4489 
   4490 			if ((sc->sc_type == WM_T_82574)
   4491 			    || (sc->sc_type == WM_T_82583)) {
   4492 				/*
   4493 				 * Document says this bit must be set for
   4494 				 * proper operation.
   4495 				 */
   4496 				reg = CSR_READ(sc, WMREG_GCR);
   4497 				reg |= __BIT(22);
   4498 				CSR_WRITE(sc, WMREG_GCR, reg);
   4499 
   4500 				/*
   4501 				 * Apply workaround for hardware errata
   4502 				 * documented in errata docs Fixes issue where
   4503 				 * some error prone or unreliable PCIe
   4504 				 * completions are occurring, particularly
   4505 				 * with ASPM enabled. Without fix, issue can
   4506 				 * cause Tx timeouts.
   4507 				 */
   4508 				reg = CSR_READ(sc, WMREG_GCR2);
   4509 				reg |= __BIT(0);
   4510 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4511 			}
   4512 			break;
   4513 		case WM_T_80003:
   4514 			/* TARC0 */
   4515 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4516 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4517 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4518 
   4519 			/* TARC1 bit 28 */
   4520 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4521 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4522 				tarc1 &= ~__BIT(28);
   4523 			else
   4524 				tarc1 |= __BIT(28);
   4525 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4526 			break;
   4527 		case WM_T_ICH8:
   4528 		case WM_T_ICH9:
   4529 		case WM_T_ICH10:
   4530 		case WM_T_PCH:
   4531 		case WM_T_PCH2:
   4532 		case WM_T_PCH_LPT:
   4533 		case WM_T_PCH_SPT:
   4534 		case WM_T_PCH_CNP:
   4535 			/* TARC0 */
   4536 			if (sc->sc_type == WM_T_ICH8) {
   4537 				/* Set TARC0 bits 29 and 28 */
   4538 				tarc0 |= __BITS(29, 28);
   4539 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4540 				tarc0 |= __BIT(29);
   4541 				/*
   4542 				 *  Drop bit 28. From Linux.
   4543 				 * See I218/I219 spec update
   4544 				 * "5. Buffer Overrun While the I219 is
   4545 				 * Processing DMA Transactions"
   4546 				 */
   4547 				tarc0 &= ~__BIT(28);
   4548 			}
   4549 			/* Set TARC0 bits 23,24,26,27 */
   4550 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4551 
   4552 			/* CTRL_EXT */
   4553 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4554 			reg |= __BIT(22);	/* Set bit 22 */
   4555 			/*
   4556 			 * Enable PHY low-power state when MAC is at D3
   4557 			 * w/o WoL
   4558 			 */
   4559 			if (sc->sc_type >= WM_T_PCH)
   4560 				reg |= CTRL_EXT_PHYPDEN;
   4561 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4562 
   4563 			/* TARC1 */
   4564 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4565 			/* bit 28 */
   4566 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4567 				tarc1 &= ~__BIT(28);
   4568 			else
   4569 				tarc1 |= __BIT(28);
   4570 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4571 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4572 
   4573 			/* Device Status */
   4574 			if (sc->sc_type == WM_T_ICH8) {
   4575 				reg = CSR_READ(sc, WMREG_STATUS);
   4576 				reg &= ~__BIT(31);
   4577 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4578 
   4579 			}
   4580 
   4581 			/* IOSFPC */
   4582 			if (sc->sc_type == WM_T_PCH_SPT) {
   4583 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4584 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4585 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4586 			}
   4587 			/*
   4588 			 * Work-around descriptor data corruption issue during
   4589 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4590 			 * capability.
   4591 			 */
   4592 			reg = CSR_READ(sc, WMREG_RFCTL);
   4593 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4594 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4595 			break;
   4596 		default:
   4597 			break;
   4598 		}
   4599 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4600 
   4601 		switch (sc->sc_type) {
   4602 		/*
   4603 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4604 		 * Avoid RSS Hash Value bug.
   4605 		 */
   4606 		case WM_T_82571:
   4607 		case WM_T_82572:
   4608 		case WM_T_82573:
   4609 		case WM_T_80003:
   4610 		case WM_T_ICH8:
   4611 			reg = CSR_READ(sc, WMREG_RFCTL);
   4612 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4613 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4614 			break;
   4615 		case WM_T_82574:
   4616 			/* Use extened Rx descriptor. */
   4617 			reg = CSR_READ(sc, WMREG_RFCTL);
   4618 			reg |= WMREG_RFCTL_EXSTEN;
   4619 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4620 			break;
   4621 		default:
   4622 			break;
   4623 		}
   4624 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4625 		/*
   4626 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4627 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4628 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4629 		 * Correctly by the Device"
   4630 		 *
   4631 		 * I354(C2000) Errata AVR53:
   4632 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4633 		 * Hang"
   4634 		 */
   4635 		reg = CSR_READ(sc, WMREG_RFCTL);
   4636 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4637 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4638 	}
   4639 }
   4640 
   4641 static uint32_t
   4642 wm_rxpbs_adjust_82580(uint32_t val)
   4643 {
   4644 	uint32_t rv = 0;
   4645 
   4646 	if (val < __arraycount(wm_82580_rxpbs_table))
   4647 		rv = wm_82580_rxpbs_table[val];
   4648 
   4649 	return rv;
   4650 }
   4651 
   4652 /*
   4653  * wm_reset_phy:
   4654  *
   4655  *	generic PHY reset function.
   4656  *	Same as e1000_phy_hw_reset_generic()
   4657  */
   4658 static int
   4659 wm_reset_phy(struct wm_softc *sc)
   4660 {
   4661 	uint32_t reg;
   4662 
   4663 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4664 		device_xname(sc->sc_dev), __func__));
   4665 	if (wm_phy_resetisblocked(sc))
   4666 		return -1;
   4667 
   4668 	sc->phy.acquire(sc);
   4669 
   4670 	reg = CSR_READ(sc, WMREG_CTRL);
   4671 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4672 	CSR_WRITE_FLUSH(sc);
   4673 
   4674 	delay(sc->phy.reset_delay_us);
   4675 
   4676 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4677 	CSR_WRITE_FLUSH(sc);
   4678 
   4679 	delay(150);
   4680 
   4681 	sc->phy.release(sc);
   4682 
   4683 	wm_get_cfg_done(sc);
   4684 	wm_phy_post_reset(sc);
   4685 
   4686 	return 0;
   4687 }
   4688 
   4689 /*
   4690  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4691  * so it is enough to check sc->sc_queue[0] only.
   4692  */
   4693 static void
   4694 wm_flush_desc_rings(struct wm_softc *sc)
   4695 {
   4696 	pcireg_t preg;
   4697 	uint32_t reg;
   4698 	struct wm_txqueue *txq;
   4699 	wiseman_txdesc_t *txd;
   4700 	int nexttx;
   4701 	uint32_t rctl;
   4702 
   4703 	/* First, disable MULR fix in FEXTNVM11 */
   4704 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4705 	reg |= FEXTNVM11_DIS_MULRFIX;
   4706 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4707 
   4708 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4709 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4710 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4711 		return;
   4712 
   4713 	/* TX */
   4714 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4715 	    preg, reg);
   4716 	reg = CSR_READ(sc, WMREG_TCTL);
   4717 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4718 
   4719 	txq = &sc->sc_queue[0].wmq_txq;
   4720 	nexttx = txq->txq_next;
   4721 	txd = &txq->txq_descs[nexttx];
   4722 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4723 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4724 	txd->wtx_fields.wtxu_status = 0;
   4725 	txd->wtx_fields.wtxu_options = 0;
   4726 	txd->wtx_fields.wtxu_vlan = 0;
   4727 
   4728 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4729 	    BUS_SPACE_BARRIER_WRITE);
   4730 
   4731 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4732 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4733 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4734 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4735 	delay(250);
   4736 
   4737 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4738 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4739 		return;
   4740 
   4741 	/* RX */
   4742 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4743 	rctl = CSR_READ(sc, WMREG_RCTL);
   4744 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4745 	CSR_WRITE_FLUSH(sc);
   4746 	delay(150);
   4747 
   4748 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4749 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4750 	reg &= 0xffffc000;
   4751 	/*
   4752 	 * Update thresholds: prefetch threshold to 31, host threshold
   4753 	 * to 1 and make sure the granularity is "descriptors" and not
   4754 	 * "cache lines"
   4755 	 */
   4756 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4757 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4758 
   4759 	/* Momentarily enable the RX ring for the changes to take effect */
   4760 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4761 	CSR_WRITE_FLUSH(sc);
   4762 	delay(150);
   4763 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4764 }
   4765 
   4766 /*
   4767  * wm_reset:
   4768  *
   4769  *	Reset the i82542 chip.
   4770  */
   4771 static void
   4772 wm_reset(struct wm_softc *sc)
   4773 {
   4774 	int phy_reset = 0;
   4775 	int i, error = 0;
   4776 	uint32_t reg;
   4777 	uint16_t kmreg;
   4778 	int rv;
   4779 
   4780 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4781 		device_xname(sc->sc_dev), __func__));
   4782 	KASSERT(sc->sc_type != 0);
   4783 
   4784 	/*
   4785 	 * Allocate on-chip memory according to the MTU size.
   4786 	 * The Packet Buffer Allocation register must be written
   4787 	 * before the chip is reset.
   4788 	 */
   4789 	switch (sc->sc_type) {
   4790 	case WM_T_82547:
   4791 	case WM_T_82547_2:
   4792 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4793 		    PBA_22K : PBA_30K;
   4794 		for (i = 0; i < sc->sc_nqueues; i++) {
   4795 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4796 			txq->txq_fifo_head = 0;
   4797 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4798 			txq->txq_fifo_size =
   4799 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4800 			txq->txq_fifo_stall = 0;
   4801 		}
   4802 		break;
   4803 	case WM_T_82571:
   4804 	case WM_T_82572:
   4805 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4806 	case WM_T_80003:
   4807 		sc->sc_pba = PBA_32K;
   4808 		break;
   4809 	case WM_T_82573:
   4810 		sc->sc_pba = PBA_12K;
   4811 		break;
   4812 	case WM_T_82574:
   4813 	case WM_T_82583:
   4814 		sc->sc_pba = PBA_20K;
   4815 		break;
   4816 	case WM_T_82576:
   4817 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4818 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4819 		break;
   4820 	case WM_T_82580:
   4821 	case WM_T_I350:
   4822 	case WM_T_I354:
   4823 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4824 		break;
   4825 	case WM_T_I210:
   4826 	case WM_T_I211:
   4827 		sc->sc_pba = PBA_34K;
   4828 		break;
   4829 	case WM_T_ICH8:
   4830 		/* Workaround for a bit corruption issue in FIFO memory */
   4831 		sc->sc_pba = PBA_8K;
   4832 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4833 		break;
   4834 	case WM_T_ICH9:
   4835 	case WM_T_ICH10:
   4836 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4837 		    PBA_14K : PBA_10K;
   4838 		break;
   4839 	case WM_T_PCH:
   4840 	case WM_T_PCH2:	/* XXX 14K? */
   4841 	case WM_T_PCH_LPT:
   4842 	case WM_T_PCH_SPT:
   4843 	case WM_T_PCH_CNP:
   4844 		sc->sc_pba = PBA_26K;
   4845 		break;
   4846 	default:
   4847 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4848 		    PBA_40K : PBA_48K;
   4849 		break;
   4850 	}
   4851 	/*
   4852 	 * Only old or non-multiqueue devices have the PBA register
   4853 	 * XXX Need special handling for 82575.
   4854 	 */
   4855 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4856 	    || (sc->sc_type == WM_T_82575))
   4857 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4858 
   4859 	/* Prevent the PCI-E bus from sticking */
   4860 	if (sc->sc_flags & WM_F_PCIE) {
   4861 		int timeout = 800;
   4862 
   4863 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4864 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4865 
   4866 		while (timeout--) {
   4867 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4868 			    == 0)
   4869 				break;
   4870 			delay(100);
   4871 		}
   4872 		if (timeout == 0)
   4873 			device_printf(sc->sc_dev,
   4874 			    "failed to disable busmastering\n");
   4875 	}
   4876 
   4877 	/* Set the completion timeout for interface */
   4878 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4879 	    || (sc->sc_type == WM_T_82580)
   4880 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4881 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4882 		wm_set_pcie_completion_timeout(sc);
   4883 
   4884 	/* Clear interrupt */
   4885 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4886 	if (wm_is_using_msix(sc)) {
   4887 		if (sc->sc_type != WM_T_82574) {
   4888 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4889 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4890 		} else
   4891 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4892 	}
   4893 
   4894 	/* Stop the transmit and receive processes. */
   4895 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4896 	sc->sc_rctl &= ~RCTL_EN;
   4897 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4898 	CSR_WRITE_FLUSH(sc);
   4899 
   4900 	/* XXX set_tbi_sbp_82543() */
   4901 
   4902 	delay(10*1000);
   4903 
   4904 	/* Must acquire the MDIO ownership before MAC reset */
   4905 	switch (sc->sc_type) {
   4906 	case WM_T_82573:
   4907 	case WM_T_82574:
   4908 	case WM_T_82583:
   4909 		error = wm_get_hw_semaphore_82573(sc);
   4910 		break;
   4911 	default:
   4912 		break;
   4913 	}
   4914 
   4915 	/*
   4916 	 * 82541 Errata 29? & 82547 Errata 28?
   4917 	 * See also the description about PHY_RST bit in CTRL register
   4918 	 * in 8254x_GBe_SDM.pdf.
   4919 	 */
   4920 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4921 		CSR_WRITE(sc, WMREG_CTRL,
   4922 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4923 		CSR_WRITE_FLUSH(sc);
   4924 		delay(5000);
   4925 	}
   4926 
   4927 	switch (sc->sc_type) {
   4928 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4929 	case WM_T_82541:
   4930 	case WM_T_82541_2:
   4931 	case WM_T_82547:
   4932 	case WM_T_82547_2:
   4933 		/*
   4934 		 * On some chipsets, a reset through a memory-mapped write
   4935 		 * cycle can cause the chip to reset before completing the
   4936 		 * write cycle. This causes major headache that can be avoided
   4937 		 * by issuing the reset via indirect register writes through
   4938 		 * I/O space.
   4939 		 *
   4940 		 * So, if we successfully mapped the I/O BAR at attach time,
   4941 		 * use that. Otherwise, try our luck with a memory-mapped
   4942 		 * reset.
   4943 		 */
   4944 		if (sc->sc_flags & WM_F_IOH_VALID)
   4945 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4946 		else
   4947 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4948 		break;
   4949 	case WM_T_82545_3:
   4950 	case WM_T_82546_3:
   4951 		/* Use the shadow control register on these chips. */
   4952 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4953 		break;
   4954 	case WM_T_80003:
   4955 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4956 		sc->phy.acquire(sc);
   4957 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4958 		sc->phy.release(sc);
   4959 		break;
   4960 	case WM_T_ICH8:
   4961 	case WM_T_ICH9:
   4962 	case WM_T_ICH10:
   4963 	case WM_T_PCH:
   4964 	case WM_T_PCH2:
   4965 	case WM_T_PCH_LPT:
   4966 	case WM_T_PCH_SPT:
   4967 	case WM_T_PCH_CNP:
   4968 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4969 		if (wm_phy_resetisblocked(sc) == false) {
   4970 			/*
   4971 			 * Gate automatic PHY configuration by hardware on
   4972 			 * non-managed 82579
   4973 			 */
   4974 			if ((sc->sc_type == WM_T_PCH2)
   4975 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4976 				== 0))
   4977 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4978 
   4979 			reg |= CTRL_PHY_RESET;
   4980 			phy_reset = 1;
   4981 		} else
   4982 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   4983 		sc->phy.acquire(sc);
   4984 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4985 		/* Don't insert a completion barrier when reset */
   4986 		delay(20*1000);
   4987 		mutex_exit(sc->sc_ich_phymtx);
   4988 		break;
   4989 	case WM_T_82580:
   4990 	case WM_T_I350:
   4991 	case WM_T_I354:
   4992 	case WM_T_I210:
   4993 	case WM_T_I211:
   4994 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4995 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4996 			CSR_WRITE_FLUSH(sc);
   4997 		delay(5000);
   4998 		break;
   4999 	case WM_T_82542_2_0:
   5000 	case WM_T_82542_2_1:
   5001 	case WM_T_82543:
   5002 	case WM_T_82540:
   5003 	case WM_T_82545:
   5004 	case WM_T_82546:
   5005 	case WM_T_82571:
   5006 	case WM_T_82572:
   5007 	case WM_T_82573:
   5008 	case WM_T_82574:
   5009 	case WM_T_82575:
   5010 	case WM_T_82576:
   5011 	case WM_T_82583:
   5012 	default:
   5013 		/* Everything else can safely use the documented method. */
   5014 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5015 		break;
   5016 	}
   5017 
   5018 	/* Must release the MDIO ownership after MAC reset */
   5019 	switch (sc->sc_type) {
   5020 	case WM_T_82573:
   5021 	case WM_T_82574:
   5022 	case WM_T_82583:
   5023 		if (error == 0)
   5024 			wm_put_hw_semaphore_82573(sc);
   5025 		break;
   5026 	default:
   5027 		break;
   5028 	}
   5029 
   5030 	/* Set Phy Config Counter to 50msec */
   5031 	if (sc->sc_type == WM_T_PCH2) {
   5032 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5033 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5034 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5035 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5036 	}
   5037 
   5038 	if (phy_reset != 0)
   5039 		wm_get_cfg_done(sc);
   5040 
   5041 	/* Reload EEPROM */
   5042 	switch (sc->sc_type) {
   5043 	case WM_T_82542_2_0:
   5044 	case WM_T_82542_2_1:
   5045 	case WM_T_82543:
   5046 	case WM_T_82544:
   5047 		delay(10);
   5048 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5049 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5050 		CSR_WRITE_FLUSH(sc);
   5051 		delay(2000);
   5052 		break;
   5053 	case WM_T_82540:
   5054 	case WM_T_82545:
   5055 	case WM_T_82545_3:
   5056 	case WM_T_82546:
   5057 	case WM_T_82546_3:
   5058 		delay(5*1000);
   5059 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5060 		break;
   5061 	case WM_T_82541:
   5062 	case WM_T_82541_2:
   5063 	case WM_T_82547:
   5064 	case WM_T_82547_2:
   5065 		delay(20000);
   5066 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5067 		break;
   5068 	case WM_T_82571:
   5069 	case WM_T_82572:
   5070 	case WM_T_82573:
   5071 	case WM_T_82574:
   5072 	case WM_T_82583:
   5073 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5074 			delay(10);
   5075 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5076 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5077 			CSR_WRITE_FLUSH(sc);
   5078 		}
   5079 		/* check EECD_EE_AUTORD */
   5080 		wm_get_auto_rd_done(sc);
   5081 		/*
   5082 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5083 		 * is set.
   5084 		 */
   5085 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5086 		    || (sc->sc_type == WM_T_82583))
   5087 			delay(25*1000);
   5088 		break;
   5089 	case WM_T_82575:
   5090 	case WM_T_82576:
   5091 	case WM_T_82580:
   5092 	case WM_T_I350:
   5093 	case WM_T_I354:
   5094 	case WM_T_I210:
   5095 	case WM_T_I211:
   5096 	case WM_T_80003:
   5097 		/* check EECD_EE_AUTORD */
   5098 		wm_get_auto_rd_done(sc);
   5099 		break;
   5100 	case WM_T_ICH8:
   5101 	case WM_T_ICH9:
   5102 	case WM_T_ICH10:
   5103 	case WM_T_PCH:
   5104 	case WM_T_PCH2:
   5105 	case WM_T_PCH_LPT:
   5106 	case WM_T_PCH_SPT:
   5107 	case WM_T_PCH_CNP:
   5108 		break;
   5109 	default:
   5110 		panic("%s: unknown type\n", __func__);
   5111 	}
   5112 
   5113 	/* Check whether EEPROM is present or not */
   5114 	switch (sc->sc_type) {
   5115 	case WM_T_82575:
   5116 	case WM_T_82576:
   5117 	case WM_T_82580:
   5118 	case WM_T_I350:
   5119 	case WM_T_I354:
   5120 	case WM_T_ICH8:
   5121 	case WM_T_ICH9:
   5122 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5123 			/* Not found */
   5124 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5125 			if (sc->sc_type == WM_T_82575)
   5126 				wm_reset_init_script_82575(sc);
   5127 		}
   5128 		break;
   5129 	default:
   5130 		break;
   5131 	}
   5132 
   5133 	if (phy_reset != 0)
   5134 		wm_phy_post_reset(sc);
   5135 
   5136 	if ((sc->sc_type == WM_T_82580)
   5137 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5138 		/* Clear global device reset status bit */
   5139 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5140 	}
   5141 
   5142 	/* Clear any pending interrupt events. */
   5143 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5144 	reg = CSR_READ(sc, WMREG_ICR);
   5145 	if (wm_is_using_msix(sc)) {
   5146 		if (sc->sc_type != WM_T_82574) {
   5147 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5148 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5149 		} else
   5150 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5151 	}
   5152 
   5153 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5154 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5155 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5156 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5157 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5158 		reg |= KABGTXD_BGSQLBIAS;
   5159 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5160 	}
   5161 
   5162 	/* Reload sc_ctrl */
   5163 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5164 
   5165 	wm_set_eee(sc);
   5166 
   5167 	/*
   5168 	 * For PCH, this write will make sure that any noise will be detected
   5169 	 * as a CRC error and be dropped rather than show up as a bad packet
   5170 	 * to the DMA engine
   5171 	 */
   5172 	if (sc->sc_type == WM_T_PCH)
   5173 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5174 
   5175 	if (sc->sc_type >= WM_T_82544)
   5176 		CSR_WRITE(sc, WMREG_WUC, 0);
   5177 
   5178 	if (sc->sc_type < WM_T_82575)
   5179 		wm_disable_aspm(sc); /* Workaround for some chips */
   5180 
   5181 	wm_reset_mdicnfg_82580(sc);
   5182 
   5183 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5184 		wm_pll_workaround_i210(sc);
   5185 
   5186 	if (sc->sc_type == WM_T_80003) {
   5187 		/* Default to TRUE to enable the MDIC W/A */
   5188 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5189 
   5190 		rv = wm_kmrn_readreg(sc,
   5191 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5192 		if (rv == 0) {
   5193 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5194 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5195 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5196 			else
   5197 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5198 		}
   5199 	}
   5200 }
   5201 
   5202 /*
   5203  * wm_add_rxbuf:
   5204  *
   5205  *	Add a receive buffer to the indiciated descriptor.
   5206  */
   5207 static int
   5208 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5209 {
   5210 	struct wm_softc *sc = rxq->rxq_sc;
   5211 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5212 	struct mbuf *m;
   5213 	int error;
   5214 
   5215 	KASSERT(mutex_owned(rxq->rxq_lock));
   5216 
   5217 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5218 	if (m == NULL)
   5219 		return ENOBUFS;
   5220 
   5221 	MCLGET(m, M_DONTWAIT);
   5222 	if ((m->m_flags & M_EXT) == 0) {
   5223 		m_freem(m);
   5224 		return ENOBUFS;
   5225 	}
   5226 
   5227 	if (rxs->rxs_mbuf != NULL)
   5228 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5229 
   5230 	rxs->rxs_mbuf = m;
   5231 
   5232 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5233 	/*
   5234 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5235 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5236 	 */
   5237 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5238 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5239 	if (error) {
   5240 		/* XXX XXX XXX */
   5241 		aprint_error_dev(sc->sc_dev,
   5242 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5243 		panic("wm_add_rxbuf");
   5244 	}
   5245 
   5246 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5247 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5248 
   5249 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5250 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5251 			wm_init_rxdesc(rxq, idx);
   5252 	} else
   5253 		wm_init_rxdesc(rxq, idx);
   5254 
   5255 	return 0;
   5256 }
   5257 
   5258 /*
   5259  * wm_rxdrain:
   5260  *
   5261  *	Drain the receive queue.
   5262  */
   5263 static void
   5264 wm_rxdrain(struct wm_rxqueue *rxq)
   5265 {
   5266 	struct wm_softc *sc = rxq->rxq_sc;
   5267 	struct wm_rxsoft *rxs;
   5268 	int i;
   5269 
   5270 	KASSERT(mutex_owned(rxq->rxq_lock));
   5271 
   5272 	for (i = 0; i < WM_NRXDESC; i++) {
   5273 		rxs = &rxq->rxq_soft[i];
   5274 		if (rxs->rxs_mbuf != NULL) {
   5275 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5276 			m_freem(rxs->rxs_mbuf);
   5277 			rxs->rxs_mbuf = NULL;
   5278 		}
   5279 	}
   5280 }
   5281 
   5282 /*
   5283  * Setup registers for RSS.
   5284  *
   5285  * XXX not yet VMDq support
   5286  */
   5287 static void
   5288 wm_init_rss(struct wm_softc *sc)
   5289 {
   5290 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5291 	int i;
   5292 
   5293 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5294 
   5295 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5296 		unsigned int qid, reta_ent;
   5297 
   5298 		qid  = i % sc->sc_nqueues;
   5299 		switch (sc->sc_type) {
   5300 		case WM_T_82574:
   5301 			reta_ent = __SHIFTIN(qid,
   5302 			    RETA_ENT_QINDEX_MASK_82574);
   5303 			break;
   5304 		case WM_T_82575:
   5305 			reta_ent = __SHIFTIN(qid,
   5306 			    RETA_ENT_QINDEX1_MASK_82575);
   5307 			break;
   5308 		default:
   5309 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5310 			break;
   5311 		}
   5312 
   5313 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5314 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5315 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5316 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5317 	}
   5318 
   5319 	rss_getkey((uint8_t *)rss_key);
   5320 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5321 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5322 
   5323 	if (sc->sc_type == WM_T_82574)
   5324 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5325 	else
   5326 		mrqc = MRQC_ENABLE_RSS_MQ;
   5327 
   5328 	/*
   5329 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5330 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5331 	 */
   5332 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5333 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5334 #if 0
   5335 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5336 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5337 #endif
   5338 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5339 
   5340 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5341 }
   5342 
   5343 /*
   5344  * Adjust TX and RX queue numbers which the system actulally uses.
   5345  *
   5346  * The numbers are affected by below parameters.
   5347  *     - The nubmer of hardware queues
   5348  *     - The number of MSI-X vectors (= "nvectors" argument)
   5349  *     - ncpu
   5350  */
   5351 static void
   5352 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5353 {
   5354 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5355 
   5356 	if (nvectors < 2) {
   5357 		sc->sc_nqueues = 1;
   5358 		return;
   5359 	}
   5360 
   5361 	switch (sc->sc_type) {
   5362 	case WM_T_82572:
   5363 		hw_ntxqueues = 2;
   5364 		hw_nrxqueues = 2;
   5365 		break;
   5366 	case WM_T_82574:
   5367 		hw_ntxqueues = 2;
   5368 		hw_nrxqueues = 2;
   5369 		break;
   5370 	case WM_T_82575:
   5371 		hw_ntxqueues = 4;
   5372 		hw_nrxqueues = 4;
   5373 		break;
   5374 	case WM_T_82576:
   5375 		hw_ntxqueues = 16;
   5376 		hw_nrxqueues = 16;
   5377 		break;
   5378 	case WM_T_82580:
   5379 	case WM_T_I350:
   5380 	case WM_T_I354:
   5381 		hw_ntxqueues = 8;
   5382 		hw_nrxqueues = 8;
   5383 		break;
   5384 	case WM_T_I210:
   5385 		hw_ntxqueues = 4;
   5386 		hw_nrxqueues = 4;
   5387 		break;
   5388 	case WM_T_I211:
   5389 		hw_ntxqueues = 2;
   5390 		hw_nrxqueues = 2;
   5391 		break;
   5392 		/*
   5393 		 * As below ethernet controllers does not support MSI-X,
   5394 		 * this driver let them not use multiqueue.
   5395 		 *     - WM_T_80003
   5396 		 *     - WM_T_ICH8
   5397 		 *     - WM_T_ICH9
   5398 		 *     - WM_T_ICH10
   5399 		 *     - WM_T_PCH
   5400 		 *     - WM_T_PCH2
   5401 		 *     - WM_T_PCH_LPT
   5402 		 */
   5403 	default:
   5404 		hw_ntxqueues = 1;
   5405 		hw_nrxqueues = 1;
   5406 		break;
   5407 	}
   5408 
   5409 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5410 
   5411 	/*
   5412 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5413 	 * the number of queues used actually.
   5414 	 */
   5415 	if (nvectors < hw_nqueues + 1)
   5416 		sc->sc_nqueues = nvectors - 1;
   5417 	else
   5418 		sc->sc_nqueues = hw_nqueues;
   5419 
   5420 	/*
   5421 	 * As queues more then cpus cannot improve scaling, we limit
   5422 	 * the number of queues used actually.
   5423 	 */
   5424 	if (ncpu < sc->sc_nqueues)
   5425 		sc->sc_nqueues = ncpu;
   5426 }
   5427 
   5428 static inline bool
   5429 wm_is_using_msix(struct wm_softc *sc)
   5430 {
   5431 
   5432 	return (sc->sc_nintrs > 1);
   5433 }
   5434 
   5435 static inline bool
   5436 wm_is_using_multiqueue(struct wm_softc *sc)
   5437 {
   5438 
   5439 	return (sc->sc_nqueues > 1);
   5440 }
   5441 
   5442 static int
   5443 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5444 {
   5445 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5446 
   5447 	wmq->wmq_id = qidx;
   5448 	wmq->wmq_intr_idx = intr_idx;
   5449 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5450 #ifdef WM_MPSAFE
   5451 	    | SOFTINT_MPSAFE
   5452 #endif
   5453 	    , wm_handle_queue, wmq);
   5454 	if (wmq->wmq_si != NULL)
   5455 		return 0;
   5456 
   5457 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5458 	    wmq->wmq_id);
   5459 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5460 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5461 	return ENOMEM;
   5462 }
   5463 
   5464 /*
   5465  * Both single interrupt MSI and INTx can use this function.
   5466  */
   5467 static int
   5468 wm_setup_legacy(struct wm_softc *sc)
   5469 {
   5470 	pci_chipset_tag_t pc = sc->sc_pc;
   5471 	const char *intrstr = NULL;
   5472 	char intrbuf[PCI_INTRSTR_LEN];
   5473 	int error;
   5474 
   5475 	error = wm_alloc_txrx_queues(sc);
   5476 	if (error) {
   5477 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5478 		    error);
   5479 		return ENOMEM;
   5480 	}
   5481 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5482 	    sizeof(intrbuf));
   5483 #ifdef WM_MPSAFE
   5484 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5485 #endif
   5486 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5487 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5488 	if (sc->sc_ihs[0] == NULL) {
   5489 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5490 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5491 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5492 		return ENOMEM;
   5493 	}
   5494 
   5495 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5496 	sc->sc_nintrs = 1;
   5497 
   5498 	return wm_softint_establish(sc, 0, 0);
   5499 }
   5500 
   5501 static int
   5502 wm_setup_msix(struct wm_softc *sc)
   5503 {
   5504 	void *vih;
   5505 	kcpuset_t *affinity;
   5506 	int qidx, error, intr_idx, txrx_established;
   5507 	pci_chipset_tag_t pc = sc->sc_pc;
   5508 	const char *intrstr = NULL;
   5509 	char intrbuf[PCI_INTRSTR_LEN];
   5510 	char intr_xname[INTRDEVNAMEBUF];
   5511 
   5512 	if (sc->sc_nqueues < ncpu) {
   5513 		/*
   5514 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5515 		 * interrupts start from CPU#1.
   5516 		 */
   5517 		sc->sc_affinity_offset = 1;
   5518 	} else {
   5519 		/*
   5520 		 * In this case, this device use all CPUs. So, we unify
   5521 		 * affinitied cpu_index to msix vector number for readability.
   5522 		 */
   5523 		sc->sc_affinity_offset = 0;
   5524 	}
   5525 
   5526 	error = wm_alloc_txrx_queues(sc);
   5527 	if (error) {
   5528 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5529 		    error);
   5530 		return ENOMEM;
   5531 	}
   5532 
   5533 	kcpuset_create(&affinity, false);
   5534 	intr_idx = 0;
   5535 
   5536 	/*
   5537 	 * TX and RX
   5538 	 */
   5539 	txrx_established = 0;
   5540 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5541 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5542 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5543 
   5544 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5545 		    sizeof(intrbuf));
   5546 #ifdef WM_MPSAFE
   5547 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5548 		    PCI_INTR_MPSAFE, true);
   5549 #endif
   5550 		memset(intr_xname, 0, sizeof(intr_xname));
   5551 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5552 		    device_xname(sc->sc_dev), qidx);
   5553 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5554 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5555 		if (vih == NULL) {
   5556 			aprint_error_dev(sc->sc_dev,
   5557 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5558 			    intrstr ? " at " : "",
   5559 			    intrstr ? intrstr : "");
   5560 
   5561 			goto fail;
   5562 		}
   5563 		kcpuset_zero(affinity);
   5564 		/* Round-robin affinity */
   5565 		kcpuset_set(affinity, affinity_to);
   5566 		error = interrupt_distribute(vih, affinity, NULL);
   5567 		if (error == 0) {
   5568 			aprint_normal_dev(sc->sc_dev,
   5569 			    "for TX and RX interrupting at %s affinity to %u\n",
   5570 			    intrstr, affinity_to);
   5571 		} else {
   5572 			aprint_normal_dev(sc->sc_dev,
   5573 			    "for TX and RX interrupting at %s\n", intrstr);
   5574 		}
   5575 		sc->sc_ihs[intr_idx] = vih;
   5576 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5577 			goto fail;
   5578 		txrx_established++;
   5579 		intr_idx++;
   5580 	}
   5581 
   5582 	/* LINK */
   5583 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5584 	    sizeof(intrbuf));
   5585 #ifdef WM_MPSAFE
   5586 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5587 #endif
   5588 	memset(intr_xname, 0, sizeof(intr_xname));
   5589 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5590 	    device_xname(sc->sc_dev));
   5591 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5592 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5593 	if (vih == NULL) {
   5594 		aprint_error_dev(sc->sc_dev,
   5595 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5596 		    intrstr ? " at " : "",
   5597 		    intrstr ? intrstr : "");
   5598 
   5599 		goto fail;
   5600 	}
   5601 	/* Keep default affinity to LINK interrupt */
   5602 	aprint_normal_dev(sc->sc_dev,
   5603 	    "for LINK interrupting at %s\n", intrstr);
   5604 	sc->sc_ihs[intr_idx] = vih;
   5605 	sc->sc_link_intr_idx = intr_idx;
   5606 
   5607 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5608 	kcpuset_destroy(affinity);
   5609 	return 0;
   5610 
   5611  fail:
   5612 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5613 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5614 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5615 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5616 	}
   5617 
   5618 	kcpuset_destroy(affinity);
   5619 	return ENOMEM;
   5620 }
   5621 
   5622 static void
   5623 wm_unset_stopping_flags(struct wm_softc *sc)
   5624 {
   5625 	int i;
   5626 
   5627 	KASSERT(WM_CORE_LOCKED(sc));
   5628 
   5629 	/* Must unset stopping flags in ascending order. */
   5630 	for (i = 0; i < sc->sc_nqueues; i++) {
   5631 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5632 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5633 
   5634 		mutex_enter(txq->txq_lock);
   5635 		txq->txq_stopping = false;
   5636 		mutex_exit(txq->txq_lock);
   5637 
   5638 		mutex_enter(rxq->rxq_lock);
   5639 		rxq->rxq_stopping = false;
   5640 		mutex_exit(rxq->rxq_lock);
   5641 	}
   5642 
   5643 	sc->sc_core_stopping = false;
   5644 }
   5645 
   5646 static void
   5647 wm_set_stopping_flags(struct wm_softc *sc)
   5648 {
   5649 	int i;
   5650 
   5651 	KASSERT(WM_CORE_LOCKED(sc));
   5652 
   5653 	sc->sc_core_stopping = true;
   5654 
   5655 	/* Must set stopping flags in ascending order. */
   5656 	for (i = 0; i < sc->sc_nqueues; i++) {
   5657 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5658 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5659 
   5660 		mutex_enter(rxq->rxq_lock);
   5661 		rxq->rxq_stopping = true;
   5662 		mutex_exit(rxq->rxq_lock);
   5663 
   5664 		mutex_enter(txq->txq_lock);
   5665 		txq->txq_stopping = true;
   5666 		mutex_exit(txq->txq_lock);
   5667 	}
   5668 }
   5669 
   5670 /*
   5671  * Write interrupt interval value to ITR or EITR
   5672  */
   5673 static void
   5674 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5675 {
   5676 
   5677 	if (!wmq->wmq_set_itr)
   5678 		return;
   5679 
   5680 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5681 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5682 
   5683 		/*
   5684 		 * 82575 doesn't have CNT_INGR field.
   5685 		 * So, overwrite counter field by software.
   5686 		 */
   5687 		if (sc->sc_type == WM_T_82575)
   5688 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5689 		else
   5690 			eitr |= EITR_CNT_INGR;
   5691 
   5692 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5693 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5694 		/*
   5695 		 * 82574 has both ITR and EITR. SET EITR when we use
   5696 		 * the multi queue function with MSI-X.
   5697 		 */
   5698 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5699 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5700 	} else {
   5701 		KASSERT(wmq->wmq_id == 0);
   5702 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5703 	}
   5704 
   5705 	wmq->wmq_set_itr = false;
   5706 }
   5707 
   5708 /*
   5709  * TODO
   5710  * Below dynamic calculation of itr is almost the same as linux igb,
   5711  * however it does not fit to wm(4). So, we will have been disable AIM
   5712  * until we will find appropriate calculation of itr.
   5713  */
   5714 /*
   5715  * calculate interrupt interval value to be going to write register in
   5716  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5717  */
   5718 static void
   5719 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5720 {
   5721 #ifdef NOTYET
   5722 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5723 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5724 	uint32_t avg_size = 0;
   5725 	uint32_t new_itr;
   5726 
   5727 	if (rxq->rxq_packets)
   5728 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5729 	if (txq->txq_packets)
   5730 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5731 
   5732 	if (avg_size == 0) {
   5733 		new_itr = 450; /* restore default value */
   5734 		goto out;
   5735 	}
   5736 
   5737 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5738 	avg_size += 24;
   5739 
   5740 	/* Don't starve jumbo frames */
   5741 	avg_size = uimin(avg_size, 3000);
   5742 
   5743 	/* Give a little boost to mid-size frames */
   5744 	if ((avg_size > 300) && (avg_size < 1200))
   5745 		new_itr = avg_size / 3;
   5746 	else
   5747 		new_itr = avg_size / 2;
   5748 
   5749 out:
   5750 	/*
   5751 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5752 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5753 	 */
   5754 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5755 		new_itr *= 4;
   5756 
   5757 	if (new_itr != wmq->wmq_itr) {
   5758 		wmq->wmq_itr = new_itr;
   5759 		wmq->wmq_set_itr = true;
   5760 	} else
   5761 		wmq->wmq_set_itr = false;
   5762 
   5763 	rxq->rxq_packets = 0;
   5764 	rxq->rxq_bytes = 0;
   5765 	txq->txq_packets = 0;
   5766 	txq->txq_bytes = 0;
   5767 #endif
   5768 }
   5769 
   5770 static void
   5771 wm_init_sysctls(struct wm_softc *sc)
   5772 {
   5773 	struct sysctllog **log;
   5774 	const struct sysctlnode *rnode, *cnode;
   5775 	int rv;
   5776 	const char *dvname;
   5777 
   5778 	log = &sc->sc_sysctllog;
   5779 	dvname = device_xname(sc->sc_dev);
   5780 
   5781 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5782 	    0, CTLTYPE_NODE, dvname,
   5783 	    SYSCTL_DESCR("wm information and settings"),
   5784 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5785 	if (rv != 0)
   5786 		goto err;
   5787 
   5788 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5789 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5790 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5791 	if (rv != 0)
   5792 		goto teardown;
   5793 
   5794 	return;
   5795 
   5796 teardown:
   5797 	sysctl_teardown(log);
   5798 err:
   5799 	sc->sc_sysctllog = NULL;
   5800 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   5801 	    __func__, rv);
   5802 }
   5803 
   5804 /*
   5805  * wm_init:		[ifnet interface function]
   5806  *
   5807  *	Initialize the interface.
   5808  */
   5809 static int
   5810 wm_init(struct ifnet *ifp)
   5811 {
   5812 	struct wm_softc *sc = ifp->if_softc;
   5813 	int ret;
   5814 
   5815 	WM_CORE_LOCK(sc);
   5816 	ret = wm_init_locked(ifp);
   5817 	WM_CORE_UNLOCK(sc);
   5818 
   5819 	return ret;
   5820 }
   5821 
   5822 static int
   5823 wm_init_locked(struct ifnet *ifp)
   5824 {
   5825 	struct wm_softc *sc = ifp->if_softc;
   5826 	struct ethercom *ec = &sc->sc_ethercom;
   5827 	int i, j, trynum, error = 0;
   5828 	uint32_t reg, sfp_mask = 0;
   5829 
   5830 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5831 		device_xname(sc->sc_dev), __func__));
   5832 	KASSERT(WM_CORE_LOCKED(sc));
   5833 
   5834 	/*
   5835 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5836 	 * There is a small but measurable benefit to avoiding the adjusment
   5837 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5838 	 * on such platforms.  One possibility is that the DMA itself is
   5839 	 * slightly more efficient if the front of the entire packet (instead
   5840 	 * of the front of the headers) is aligned.
   5841 	 *
   5842 	 * Note we must always set align_tweak to 0 if we are using
   5843 	 * jumbo frames.
   5844 	 */
   5845 #ifdef __NO_STRICT_ALIGNMENT
   5846 	sc->sc_align_tweak = 0;
   5847 #else
   5848 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5849 		sc->sc_align_tweak = 0;
   5850 	else
   5851 		sc->sc_align_tweak = 2;
   5852 #endif /* __NO_STRICT_ALIGNMENT */
   5853 
   5854 	/* Cancel any pending I/O. */
   5855 	wm_stop_locked(ifp, false, false);
   5856 
   5857 	/* Update statistics before reset */
   5858 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   5859 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   5860 
   5861 	/* PCH_SPT hardware workaround */
   5862 	if (sc->sc_type == WM_T_PCH_SPT)
   5863 		wm_flush_desc_rings(sc);
   5864 
   5865 	/* Reset the chip to a known state. */
   5866 	wm_reset(sc);
   5867 
   5868 	/*
   5869 	 * AMT based hardware can now take control from firmware
   5870 	 * Do this after reset.
   5871 	 */
   5872 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5873 		wm_get_hw_control(sc);
   5874 
   5875 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5876 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5877 		wm_legacy_irq_quirk_spt(sc);
   5878 
   5879 	/* Init hardware bits */
   5880 	wm_initialize_hardware_bits(sc);
   5881 
   5882 	/* Reset the PHY. */
   5883 	if (sc->sc_flags & WM_F_HAS_MII)
   5884 		wm_gmii_reset(sc);
   5885 
   5886 	if (sc->sc_type >= WM_T_ICH8) {
   5887 		reg = CSR_READ(sc, WMREG_GCR);
   5888 		/*
   5889 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5890 		 * default after reset.
   5891 		 */
   5892 		if (sc->sc_type == WM_T_ICH8)
   5893 			reg |= GCR_NO_SNOOP_ALL;
   5894 		else
   5895 			reg &= ~GCR_NO_SNOOP_ALL;
   5896 		CSR_WRITE(sc, WMREG_GCR, reg);
   5897 	}
   5898 	if ((sc->sc_type >= WM_T_ICH8)
   5899 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5900 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5901 
   5902 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5903 		reg |= CTRL_EXT_RO_DIS;
   5904 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5905 	}
   5906 
   5907 	/* Calculate (E)ITR value */
   5908 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5909 		/*
   5910 		 * For NEWQUEUE's EITR (except for 82575).
   5911 		 * 82575's EITR should be set same throttling value as other
   5912 		 * old controllers' ITR because the interrupt/sec calculation
   5913 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5914 		 *
   5915 		 * 82574's EITR should be set same throttling value as ITR.
   5916 		 *
   5917 		 * For N interrupts/sec, set this value to:
   5918 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5919 		 */
   5920 		sc->sc_itr_init = 450;
   5921 	} else if (sc->sc_type >= WM_T_82543) {
   5922 		/*
   5923 		 * Set up the interrupt throttling register (units of 256ns)
   5924 		 * Note that a footnote in Intel's documentation says this
   5925 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5926 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5927 		 * that that is also true for the 1024ns units of the other
   5928 		 * interrupt-related timer registers -- so, really, we ought
   5929 		 * to divide this value by 4 when the link speed is low.
   5930 		 *
   5931 		 * XXX implement this division at link speed change!
   5932 		 */
   5933 
   5934 		/*
   5935 		 * For N interrupts/sec, set this value to:
   5936 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5937 		 * absolute and packet timer values to this value
   5938 		 * divided by 4 to get "simple timer" behavior.
   5939 		 */
   5940 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5941 	}
   5942 
   5943 	error = wm_init_txrx_queues(sc);
   5944 	if (error)
   5945 		goto out;
   5946 
   5947 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   5948 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   5949 	    (sc->sc_type >= WM_T_82575))
   5950 		wm_serdes_power_up_link_82575(sc);
   5951 
   5952 	/* Clear out the VLAN table -- we don't use it (yet). */
   5953 	CSR_WRITE(sc, WMREG_VET, 0);
   5954 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5955 		trynum = 10; /* Due to hw errata */
   5956 	else
   5957 		trynum = 1;
   5958 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5959 		for (j = 0; j < trynum; j++)
   5960 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5961 
   5962 	/*
   5963 	 * Set up flow-control parameters.
   5964 	 *
   5965 	 * XXX Values could probably stand some tuning.
   5966 	 */
   5967 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5968 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5969 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5970 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5971 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5972 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5973 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5974 	}
   5975 
   5976 	sc->sc_fcrtl = FCRTL_DFLT;
   5977 	if (sc->sc_type < WM_T_82543) {
   5978 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5979 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5980 	} else {
   5981 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5982 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5983 	}
   5984 
   5985 	if (sc->sc_type == WM_T_80003)
   5986 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5987 	else
   5988 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5989 
   5990 	/* Writes the control register. */
   5991 	wm_set_vlan(sc);
   5992 
   5993 	if (sc->sc_flags & WM_F_HAS_MII) {
   5994 		uint16_t kmreg;
   5995 
   5996 		switch (sc->sc_type) {
   5997 		case WM_T_80003:
   5998 		case WM_T_ICH8:
   5999 		case WM_T_ICH9:
   6000 		case WM_T_ICH10:
   6001 		case WM_T_PCH:
   6002 		case WM_T_PCH2:
   6003 		case WM_T_PCH_LPT:
   6004 		case WM_T_PCH_SPT:
   6005 		case WM_T_PCH_CNP:
   6006 			/*
   6007 			 * Set the mac to wait the maximum time between each
   6008 			 * iteration and increase the max iterations when
   6009 			 * polling the phy; this fixes erroneous timeouts at
   6010 			 * 10Mbps.
   6011 			 */
   6012 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6013 			    0xFFFF);
   6014 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6015 			    &kmreg);
   6016 			kmreg |= 0x3F;
   6017 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6018 			    kmreg);
   6019 			break;
   6020 		default:
   6021 			break;
   6022 		}
   6023 
   6024 		if (sc->sc_type == WM_T_80003) {
   6025 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6026 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6027 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6028 
   6029 			/* Bypass RX and TX FIFO's */
   6030 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6031 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6032 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6033 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6034 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6035 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6036 		}
   6037 	}
   6038 #if 0
   6039 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6040 #endif
   6041 
   6042 	/* Set up checksum offload parameters. */
   6043 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6044 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6045 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6046 		reg |= RXCSUM_IPOFL;
   6047 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6048 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6049 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6050 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6051 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6052 
   6053 	/* Set registers about MSI-X */
   6054 	if (wm_is_using_msix(sc)) {
   6055 		uint32_t ivar, qintr_idx;
   6056 		struct wm_queue *wmq;
   6057 		unsigned int qid;
   6058 
   6059 		if (sc->sc_type == WM_T_82575) {
   6060 			/* Interrupt control */
   6061 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6062 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6063 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6064 
   6065 			/* TX and RX */
   6066 			for (i = 0; i < sc->sc_nqueues; i++) {
   6067 				wmq = &sc->sc_queue[i];
   6068 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6069 				    EITR_TX_QUEUE(wmq->wmq_id)
   6070 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6071 			}
   6072 			/* Link status */
   6073 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6074 			    EITR_OTHER);
   6075 		} else if (sc->sc_type == WM_T_82574) {
   6076 			/* Interrupt control */
   6077 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6078 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6079 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6080 
   6081 			/*
   6082 			 * Workaround issue with spurious interrupts
   6083 			 * in MSI-X mode.
   6084 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6085 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6086 			 */
   6087 			reg = CSR_READ(sc, WMREG_RFCTL);
   6088 			reg |= WMREG_RFCTL_ACKDIS;
   6089 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6090 
   6091 			ivar = 0;
   6092 			/* TX and RX */
   6093 			for (i = 0; i < sc->sc_nqueues; i++) {
   6094 				wmq = &sc->sc_queue[i];
   6095 				qid = wmq->wmq_id;
   6096 				qintr_idx = wmq->wmq_intr_idx;
   6097 
   6098 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6099 				    IVAR_TX_MASK_Q_82574(qid));
   6100 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6101 				    IVAR_RX_MASK_Q_82574(qid));
   6102 			}
   6103 			/* Link status */
   6104 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6105 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6106 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6107 		} else {
   6108 			/* Interrupt control */
   6109 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6110 			    | GPIE_EIAME | GPIE_PBA);
   6111 
   6112 			switch (sc->sc_type) {
   6113 			case WM_T_82580:
   6114 			case WM_T_I350:
   6115 			case WM_T_I354:
   6116 			case WM_T_I210:
   6117 			case WM_T_I211:
   6118 				/* TX and RX */
   6119 				for (i = 0; i < sc->sc_nqueues; i++) {
   6120 					wmq = &sc->sc_queue[i];
   6121 					qid = wmq->wmq_id;
   6122 					qintr_idx = wmq->wmq_intr_idx;
   6123 
   6124 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6125 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6126 					ivar |= __SHIFTIN((qintr_idx
   6127 						| IVAR_VALID),
   6128 					    IVAR_TX_MASK_Q(qid));
   6129 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6130 					ivar |= __SHIFTIN((qintr_idx
   6131 						| IVAR_VALID),
   6132 					    IVAR_RX_MASK_Q(qid));
   6133 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6134 				}
   6135 				break;
   6136 			case WM_T_82576:
   6137 				/* TX and RX */
   6138 				for (i = 0; i < sc->sc_nqueues; i++) {
   6139 					wmq = &sc->sc_queue[i];
   6140 					qid = wmq->wmq_id;
   6141 					qintr_idx = wmq->wmq_intr_idx;
   6142 
   6143 					ivar = CSR_READ(sc,
   6144 					    WMREG_IVAR_Q_82576(qid));
   6145 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6146 					ivar |= __SHIFTIN((qintr_idx
   6147 						| IVAR_VALID),
   6148 					    IVAR_TX_MASK_Q_82576(qid));
   6149 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6150 					ivar |= __SHIFTIN((qintr_idx
   6151 						| IVAR_VALID),
   6152 					    IVAR_RX_MASK_Q_82576(qid));
   6153 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6154 					    ivar);
   6155 				}
   6156 				break;
   6157 			default:
   6158 				break;
   6159 			}
   6160 
   6161 			/* Link status */
   6162 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6163 			    IVAR_MISC_OTHER);
   6164 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6165 		}
   6166 
   6167 		if (wm_is_using_multiqueue(sc)) {
   6168 			wm_init_rss(sc);
   6169 
   6170 			/*
   6171 			** NOTE: Receive Full-Packet Checksum Offload
   6172 			** is mutually exclusive with Multiqueue. However
   6173 			** this is not the same as TCP/IP checksums which
   6174 			** still work.
   6175 			*/
   6176 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6177 			reg |= RXCSUM_PCSD;
   6178 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6179 		}
   6180 	}
   6181 
   6182 	/* Set up the interrupt registers. */
   6183 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6184 
   6185 	/* Enable SFP module insertion interrupt if it's required */
   6186 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6187 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6188 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6189 		sfp_mask = ICR_GPI(0);
   6190 	}
   6191 
   6192 	if (wm_is_using_msix(sc)) {
   6193 		uint32_t mask;
   6194 		struct wm_queue *wmq;
   6195 
   6196 		switch (sc->sc_type) {
   6197 		case WM_T_82574:
   6198 			mask = 0;
   6199 			for (i = 0; i < sc->sc_nqueues; i++) {
   6200 				wmq = &sc->sc_queue[i];
   6201 				mask |= ICR_TXQ(wmq->wmq_id);
   6202 				mask |= ICR_RXQ(wmq->wmq_id);
   6203 			}
   6204 			mask |= ICR_OTHER;
   6205 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6206 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6207 			break;
   6208 		default:
   6209 			if (sc->sc_type == WM_T_82575) {
   6210 				mask = 0;
   6211 				for (i = 0; i < sc->sc_nqueues; i++) {
   6212 					wmq = &sc->sc_queue[i];
   6213 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6214 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6215 				}
   6216 				mask |= EITR_OTHER;
   6217 			} else {
   6218 				mask = 0;
   6219 				for (i = 0; i < sc->sc_nqueues; i++) {
   6220 					wmq = &sc->sc_queue[i];
   6221 					mask |= 1 << wmq->wmq_intr_idx;
   6222 				}
   6223 				mask |= 1 << sc->sc_link_intr_idx;
   6224 			}
   6225 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6226 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6227 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6228 
   6229 			/* For other interrupts */
   6230 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6231 			break;
   6232 		}
   6233 	} else {
   6234 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6235 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6236 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6237 	}
   6238 
   6239 	/* Set up the inter-packet gap. */
   6240 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6241 
   6242 	if (sc->sc_type >= WM_T_82543) {
   6243 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6244 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6245 			wm_itrs_writereg(sc, wmq);
   6246 		}
   6247 		/*
   6248 		 * Link interrupts occur much less than TX
   6249 		 * interrupts and RX interrupts. So, we don't
   6250 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6251 		 * FreeBSD's if_igb.
   6252 		 */
   6253 	}
   6254 
   6255 	/* Set the VLAN ethernetype. */
   6256 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6257 
   6258 	/*
   6259 	 * Set up the transmit control register; we start out with
   6260 	 * a collision distance suitable for FDX, but update it whe
   6261 	 * we resolve the media type.
   6262 	 */
   6263 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6264 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6265 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6266 	if (sc->sc_type >= WM_T_82571)
   6267 		sc->sc_tctl |= TCTL_MULR;
   6268 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6269 
   6270 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6271 		/* Write TDT after TCTL.EN is set. See the document. */
   6272 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6273 	}
   6274 
   6275 	if (sc->sc_type == WM_T_80003) {
   6276 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6277 		reg &= ~TCTL_EXT_GCEX_MASK;
   6278 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6279 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6280 	}
   6281 
   6282 	/* Set the media. */
   6283 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6284 		goto out;
   6285 
   6286 	/* Configure for OS presence */
   6287 	wm_init_manageability(sc);
   6288 
   6289 	/*
   6290 	 * Set up the receive control register; we actually program the
   6291 	 * register when we set the receive filter. Use multicast address
   6292 	 * offset type 0.
   6293 	 *
   6294 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6295 	 * don't enable that feature.
   6296 	 */
   6297 	sc->sc_mchash_type = 0;
   6298 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6299 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6300 
   6301 	/* 82574 use one buffer extended Rx descriptor. */
   6302 	if (sc->sc_type == WM_T_82574)
   6303 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6304 
   6305 	/*
   6306 	 * The I350 has a bug where it always strips the CRC whether
   6307 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6308 	 */
   6309 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6310 	    || (sc->sc_type == WM_T_I210))
   6311 		sc->sc_rctl |= RCTL_SECRC;
   6312 
   6313 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6314 	    && (ifp->if_mtu > ETHERMTU)) {
   6315 		sc->sc_rctl |= RCTL_LPE;
   6316 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6317 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6318 	}
   6319 
   6320 	if (MCLBYTES == 2048)
   6321 		sc->sc_rctl |= RCTL_2k;
   6322 	else {
   6323 		if (sc->sc_type >= WM_T_82543) {
   6324 			switch (MCLBYTES) {
   6325 			case 4096:
   6326 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6327 				break;
   6328 			case 8192:
   6329 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6330 				break;
   6331 			case 16384:
   6332 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6333 				break;
   6334 			default:
   6335 				panic("wm_init: MCLBYTES %d unsupported",
   6336 				    MCLBYTES);
   6337 				break;
   6338 			}
   6339 		} else
   6340 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6341 	}
   6342 
   6343 	/* Enable ECC */
   6344 	switch (sc->sc_type) {
   6345 	case WM_T_82571:
   6346 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6347 		reg |= PBA_ECC_CORR_EN;
   6348 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6349 		break;
   6350 	case WM_T_PCH_LPT:
   6351 	case WM_T_PCH_SPT:
   6352 	case WM_T_PCH_CNP:
   6353 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6354 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6355 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6356 
   6357 		sc->sc_ctrl |= CTRL_MEHE;
   6358 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6359 		break;
   6360 	default:
   6361 		break;
   6362 	}
   6363 
   6364 	/*
   6365 	 * Set the receive filter.
   6366 	 *
   6367 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6368 	 * the setting of RCTL.EN in wm_set_filter()
   6369 	 */
   6370 	wm_set_filter(sc);
   6371 
   6372 	/* On 575 and later set RDT only if RX enabled */
   6373 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6374 		int qidx;
   6375 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6376 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6377 			for (i = 0; i < WM_NRXDESC; i++) {
   6378 				mutex_enter(rxq->rxq_lock);
   6379 				wm_init_rxdesc(rxq, i);
   6380 				mutex_exit(rxq->rxq_lock);
   6381 
   6382 			}
   6383 		}
   6384 	}
   6385 
   6386 	wm_unset_stopping_flags(sc);
   6387 
   6388 	/* Start the one second link check clock. */
   6389 	callout_schedule(&sc->sc_tick_ch, hz);
   6390 
   6391 	/* ...all done! */
   6392 	ifp->if_flags |= IFF_RUNNING;
   6393 
   6394  out:
   6395 	/* Save last flags for the callback */
   6396 	sc->sc_if_flags = ifp->if_flags;
   6397 	sc->sc_ec_capenable = ec->ec_capenable;
   6398 	if (error)
   6399 		log(LOG_ERR, "%s: interface not running\n",
   6400 		    device_xname(sc->sc_dev));
   6401 	return error;
   6402 }
   6403 
   6404 /*
   6405  * wm_stop:		[ifnet interface function]
   6406  *
   6407  *	Stop transmission on the interface.
   6408  */
   6409 static void
   6410 wm_stop(struct ifnet *ifp, int disable)
   6411 {
   6412 	struct wm_softc *sc = ifp->if_softc;
   6413 
   6414 	ASSERT_SLEEPABLE();
   6415 
   6416 	WM_CORE_LOCK(sc);
   6417 	wm_stop_locked(ifp, disable ? true : false, true);
   6418 	WM_CORE_UNLOCK(sc);
   6419 
   6420 	/*
   6421 	 * After wm_set_stopping_flags(), it is guaranteed
   6422 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6423 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6424 	 * because it can sleep...
   6425 	 * so, call workqueue_wait() here.
   6426 	 */
   6427 	for (int i = 0; i < sc->sc_nqueues; i++)
   6428 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6429 }
   6430 
   6431 static void
   6432 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   6433 {
   6434 	struct wm_softc *sc = ifp->if_softc;
   6435 	struct wm_txsoft *txs;
   6436 	int i, qidx;
   6437 
   6438 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6439 		device_xname(sc->sc_dev), __func__));
   6440 	KASSERT(WM_CORE_LOCKED(sc));
   6441 
   6442 	wm_set_stopping_flags(sc);
   6443 
   6444 	if (sc->sc_flags & WM_F_HAS_MII) {
   6445 		/* Down the MII. */
   6446 		mii_down(&sc->sc_mii);
   6447 	} else {
   6448 #if 0
   6449 		/* Should we clear PHY's status properly? */
   6450 		wm_reset(sc);
   6451 #endif
   6452 	}
   6453 
   6454 	/* Stop the transmit and receive processes. */
   6455 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6456 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6457 	sc->sc_rctl &= ~RCTL_EN;
   6458 
   6459 	/*
   6460 	 * Clear the interrupt mask to ensure the device cannot assert its
   6461 	 * interrupt line.
   6462 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6463 	 * service any currently pending or shared interrupt.
   6464 	 */
   6465 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6466 	sc->sc_icr = 0;
   6467 	if (wm_is_using_msix(sc)) {
   6468 		if (sc->sc_type != WM_T_82574) {
   6469 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6470 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6471 		} else
   6472 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6473 	}
   6474 
   6475 	/*
   6476 	 * Stop callouts after interrupts are disabled; if we have
   6477 	 * to wait for them, we will be releasing the CORE_LOCK
   6478 	 * briefly, which will unblock interrupts on the current CPU.
   6479 	 */
   6480 
   6481 	/* Stop the one second clock. */
   6482 	if (wait)
   6483 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   6484 	else
   6485 		callout_stop(&sc->sc_tick_ch);
   6486 
   6487 	/* Stop the 82547 Tx FIFO stall check timer. */
   6488 	if (sc->sc_type == WM_T_82547) {
   6489 		if (wait)
   6490 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   6491 		else
   6492 			callout_stop(&sc->sc_txfifo_ch);
   6493 	}
   6494 
   6495 	/* Release any queued transmit buffers. */
   6496 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6497 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6498 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6499 		mutex_enter(txq->txq_lock);
   6500 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6501 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6502 			txs = &txq->txq_soft[i];
   6503 			if (txs->txs_mbuf != NULL) {
   6504 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6505 				m_freem(txs->txs_mbuf);
   6506 				txs->txs_mbuf = NULL;
   6507 			}
   6508 		}
   6509 		mutex_exit(txq->txq_lock);
   6510 	}
   6511 
   6512 	/* Mark the interface as down and cancel the watchdog timer. */
   6513 	ifp->if_flags &= ~IFF_RUNNING;
   6514 
   6515 	if (disable) {
   6516 		for (i = 0; i < sc->sc_nqueues; i++) {
   6517 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6518 			mutex_enter(rxq->rxq_lock);
   6519 			wm_rxdrain(rxq);
   6520 			mutex_exit(rxq->rxq_lock);
   6521 		}
   6522 	}
   6523 
   6524 #if 0 /* notyet */
   6525 	if (sc->sc_type >= WM_T_82544)
   6526 		CSR_WRITE(sc, WMREG_WUC, 0);
   6527 #endif
   6528 }
   6529 
   6530 static void
   6531 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6532 {
   6533 	struct mbuf *m;
   6534 	int i;
   6535 
   6536 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6537 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6538 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6539 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6540 		    m->m_data, m->m_len, m->m_flags);
   6541 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6542 	    i, i == 1 ? "" : "s");
   6543 }
   6544 
   6545 /*
   6546  * wm_82547_txfifo_stall:
   6547  *
   6548  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6549  *	reset the FIFO pointers, and restart packet transmission.
   6550  */
   6551 static void
   6552 wm_82547_txfifo_stall(void *arg)
   6553 {
   6554 	struct wm_softc *sc = arg;
   6555 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6556 
   6557 	mutex_enter(txq->txq_lock);
   6558 
   6559 	if (txq->txq_stopping)
   6560 		goto out;
   6561 
   6562 	if (txq->txq_fifo_stall) {
   6563 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6564 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6565 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6566 			/*
   6567 			 * Packets have drained.  Stop transmitter, reset
   6568 			 * FIFO pointers, restart transmitter, and kick
   6569 			 * the packet queue.
   6570 			 */
   6571 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6572 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6573 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6574 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6575 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6576 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6577 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6578 			CSR_WRITE_FLUSH(sc);
   6579 
   6580 			txq->txq_fifo_head = 0;
   6581 			txq->txq_fifo_stall = 0;
   6582 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6583 		} else {
   6584 			/*
   6585 			 * Still waiting for packets to drain; try again in
   6586 			 * another tick.
   6587 			 */
   6588 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6589 		}
   6590 	}
   6591 
   6592 out:
   6593 	mutex_exit(txq->txq_lock);
   6594 }
   6595 
   6596 /*
   6597  * wm_82547_txfifo_bugchk:
   6598  *
   6599  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6600  *	prevent enqueueing a packet that would wrap around the end
   6601  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6602  *
   6603  *	We do this by checking the amount of space before the end
   6604  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6605  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6606  *	the internal FIFO pointers to the beginning, and restart
   6607  *	transmission on the interface.
   6608  */
   6609 #define	WM_FIFO_HDR		0x10
   6610 #define	WM_82547_PAD_LEN	0x3e0
   6611 static int
   6612 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6613 {
   6614 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6615 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6616 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6617 
   6618 	/* Just return if already stalled. */
   6619 	if (txq->txq_fifo_stall)
   6620 		return 1;
   6621 
   6622 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6623 		/* Stall only occurs in half-duplex mode. */
   6624 		goto send_packet;
   6625 	}
   6626 
   6627 	if (len >= WM_82547_PAD_LEN + space) {
   6628 		txq->txq_fifo_stall = 1;
   6629 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6630 		return 1;
   6631 	}
   6632 
   6633  send_packet:
   6634 	txq->txq_fifo_head += len;
   6635 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6636 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6637 
   6638 	return 0;
   6639 }
   6640 
   6641 static int
   6642 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6643 {
   6644 	int error;
   6645 
   6646 	/*
   6647 	 * Allocate the control data structures, and create and load the
   6648 	 * DMA map for it.
   6649 	 *
   6650 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6651 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6652 	 * both sets within the same 4G segment.
   6653 	 */
   6654 	if (sc->sc_type < WM_T_82544)
   6655 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6656 	else
   6657 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6658 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6659 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6660 	else
   6661 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6662 
   6663 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6664 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6665 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6666 		aprint_error_dev(sc->sc_dev,
   6667 		    "unable to allocate TX control data, error = %d\n",
   6668 		    error);
   6669 		goto fail_0;
   6670 	}
   6671 
   6672 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6673 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6674 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6675 		aprint_error_dev(sc->sc_dev,
   6676 		    "unable to map TX control data, error = %d\n", error);
   6677 		goto fail_1;
   6678 	}
   6679 
   6680 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6681 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6682 		aprint_error_dev(sc->sc_dev,
   6683 		    "unable to create TX control data DMA map, error = %d\n",
   6684 		    error);
   6685 		goto fail_2;
   6686 	}
   6687 
   6688 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6689 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6690 		aprint_error_dev(sc->sc_dev,
   6691 		    "unable to load TX control data DMA map, error = %d\n",
   6692 		    error);
   6693 		goto fail_3;
   6694 	}
   6695 
   6696 	return 0;
   6697 
   6698  fail_3:
   6699 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6700  fail_2:
   6701 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6702 	    WM_TXDESCS_SIZE(txq));
   6703  fail_1:
   6704 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6705  fail_0:
   6706 	return error;
   6707 }
   6708 
   6709 static void
   6710 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6711 {
   6712 
   6713 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6714 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6715 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6716 	    WM_TXDESCS_SIZE(txq));
   6717 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6718 }
   6719 
   6720 static int
   6721 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6722 {
   6723 	int error;
   6724 	size_t rxq_descs_size;
   6725 
   6726 	/*
   6727 	 * Allocate the control data structures, and create and load the
   6728 	 * DMA map for it.
   6729 	 *
   6730 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6731 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6732 	 * both sets within the same 4G segment.
   6733 	 */
   6734 	rxq->rxq_ndesc = WM_NRXDESC;
   6735 	if (sc->sc_type == WM_T_82574)
   6736 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6737 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6738 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6739 	else
   6740 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6741 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6742 
   6743 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6744 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6745 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6746 		aprint_error_dev(sc->sc_dev,
   6747 		    "unable to allocate RX control data, error = %d\n",
   6748 		    error);
   6749 		goto fail_0;
   6750 	}
   6751 
   6752 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6753 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6754 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6755 		aprint_error_dev(sc->sc_dev,
   6756 		    "unable to map RX control data, error = %d\n", error);
   6757 		goto fail_1;
   6758 	}
   6759 
   6760 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6761 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6762 		aprint_error_dev(sc->sc_dev,
   6763 		    "unable to create RX control data DMA map, error = %d\n",
   6764 		    error);
   6765 		goto fail_2;
   6766 	}
   6767 
   6768 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6769 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6770 		aprint_error_dev(sc->sc_dev,
   6771 		    "unable to load RX control data DMA map, error = %d\n",
   6772 		    error);
   6773 		goto fail_3;
   6774 	}
   6775 
   6776 	return 0;
   6777 
   6778  fail_3:
   6779 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6780  fail_2:
   6781 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6782 	    rxq_descs_size);
   6783  fail_1:
   6784 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6785  fail_0:
   6786 	return error;
   6787 }
   6788 
   6789 static void
   6790 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6791 {
   6792 
   6793 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6794 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6795 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6796 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6797 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6798 }
   6799 
   6800 
   6801 static int
   6802 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6803 {
   6804 	int i, error;
   6805 
   6806 	/* Create the transmit buffer DMA maps. */
   6807 	WM_TXQUEUELEN(txq) =
   6808 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6809 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6810 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6811 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6812 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6813 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6814 			aprint_error_dev(sc->sc_dev,
   6815 			    "unable to create Tx DMA map %d, error = %d\n",
   6816 			    i, error);
   6817 			goto fail;
   6818 		}
   6819 	}
   6820 
   6821 	return 0;
   6822 
   6823  fail:
   6824 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6825 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6826 			bus_dmamap_destroy(sc->sc_dmat,
   6827 			    txq->txq_soft[i].txs_dmamap);
   6828 	}
   6829 	return error;
   6830 }
   6831 
   6832 static void
   6833 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6834 {
   6835 	int i;
   6836 
   6837 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6838 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6839 			bus_dmamap_destroy(sc->sc_dmat,
   6840 			    txq->txq_soft[i].txs_dmamap);
   6841 	}
   6842 }
   6843 
   6844 static int
   6845 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6846 {
   6847 	int i, error;
   6848 
   6849 	/* Create the receive buffer DMA maps. */
   6850 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6851 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6852 			    MCLBYTES, 0, 0,
   6853 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6854 			aprint_error_dev(sc->sc_dev,
   6855 			    "unable to create Rx DMA map %d error = %d\n",
   6856 			    i, error);
   6857 			goto fail;
   6858 		}
   6859 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6860 	}
   6861 
   6862 	return 0;
   6863 
   6864  fail:
   6865 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6866 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6867 			bus_dmamap_destroy(sc->sc_dmat,
   6868 			    rxq->rxq_soft[i].rxs_dmamap);
   6869 	}
   6870 	return error;
   6871 }
   6872 
   6873 static void
   6874 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6875 {
   6876 	int i;
   6877 
   6878 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6879 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6880 			bus_dmamap_destroy(sc->sc_dmat,
   6881 			    rxq->rxq_soft[i].rxs_dmamap);
   6882 	}
   6883 }
   6884 
   6885 /*
   6886  * wm_alloc_quques:
   6887  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6888  */
   6889 static int
   6890 wm_alloc_txrx_queues(struct wm_softc *sc)
   6891 {
   6892 	int i, error, tx_done, rx_done;
   6893 
   6894 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6895 	    KM_SLEEP);
   6896 	if (sc->sc_queue == NULL) {
   6897 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6898 		error = ENOMEM;
   6899 		goto fail_0;
   6900 	}
   6901 
   6902 	/* For transmission */
   6903 	error = 0;
   6904 	tx_done = 0;
   6905 	for (i = 0; i < sc->sc_nqueues; i++) {
   6906 #ifdef WM_EVENT_COUNTERS
   6907 		int j;
   6908 		const char *xname;
   6909 #endif
   6910 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6911 		txq->txq_sc = sc;
   6912 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6913 
   6914 		error = wm_alloc_tx_descs(sc, txq);
   6915 		if (error)
   6916 			break;
   6917 		error = wm_alloc_tx_buffer(sc, txq);
   6918 		if (error) {
   6919 			wm_free_tx_descs(sc, txq);
   6920 			break;
   6921 		}
   6922 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6923 		if (txq->txq_interq == NULL) {
   6924 			wm_free_tx_descs(sc, txq);
   6925 			wm_free_tx_buffer(sc, txq);
   6926 			error = ENOMEM;
   6927 			break;
   6928 		}
   6929 
   6930 #ifdef WM_EVENT_COUNTERS
   6931 		xname = device_xname(sc->sc_dev);
   6932 
   6933 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6934 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6935 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6936 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6937 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6938 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6939 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6940 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6941 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6942 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6943 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6944 
   6945 		for (j = 0; j < WM_NTXSEGS; j++) {
   6946 			snprintf(txq->txq_txseg_evcnt_names[j],
   6947 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6948 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6949 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6950 		}
   6951 
   6952 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6953 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6954 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6955 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6956 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6957 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   6958 #endif /* WM_EVENT_COUNTERS */
   6959 
   6960 		tx_done++;
   6961 	}
   6962 	if (error)
   6963 		goto fail_1;
   6964 
   6965 	/* For receive */
   6966 	error = 0;
   6967 	rx_done = 0;
   6968 	for (i = 0; i < sc->sc_nqueues; i++) {
   6969 #ifdef WM_EVENT_COUNTERS
   6970 		const char *xname;
   6971 #endif
   6972 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6973 		rxq->rxq_sc = sc;
   6974 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6975 
   6976 		error = wm_alloc_rx_descs(sc, rxq);
   6977 		if (error)
   6978 			break;
   6979 
   6980 		error = wm_alloc_rx_buffer(sc, rxq);
   6981 		if (error) {
   6982 			wm_free_rx_descs(sc, rxq);
   6983 			break;
   6984 		}
   6985 
   6986 #ifdef WM_EVENT_COUNTERS
   6987 		xname = device_xname(sc->sc_dev);
   6988 
   6989 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6990 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6991 
   6992 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6993 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6994 #endif /* WM_EVENT_COUNTERS */
   6995 
   6996 		rx_done++;
   6997 	}
   6998 	if (error)
   6999 		goto fail_2;
   7000 
   7001 	for (i = 0; i < sc->sc_nqueues; i++) {
   7002 		char rndname[16];
   7003 
   7004 		snprintf(rndname, sizeof(rndname), "%sTXRX%d",
   7005 		    device_xname(sc->sc_dev), i);
   7006 		rnd_attach_source(&sc->sc_queue[i].rnd_source, rndname,
   7007 		    RND_TYPE_NET, RND_FLAG_DEFAULT);
   7008 	}
   7009 
   7010 	return 0;
   7011 
   7012  fail_2:
   7013 	for (i = 0; i < rx_done; i++) {
   7014 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7015 		wm_free_rx_buffer(sc, rxq);
   7016 		wm_free_rx_descs(sc, rxq);
   7017 		if (rxq->rxq_lock)
   7018 			mutex_obj_free(rxq->rxq_lock);
   7019 	}
   7020  fail_1:
   7021 	for (i = 0; i < tx_done; i++) {
   7022 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7023 		pcq_destroy(txq->txq_interq);
   7024 		wm_free_tx_buffer(sc, txq);
   7025 		wm_free_tx_descs(sc, txq);
   7026 		if (txq->txq_lock)
   7027 			mutex_obj_free(txq->txq_lock);
   7028 	}
   7029 
   7030 	kmem_free(sc->sc_queue,
   7031 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7032  fail_0:
   7033 	return error;
   7034 }
   7035 
   7036 /*
   7037  * wm_free_quques:
   7038  *	Free {tx,rx}descs and {tx,rx} buffers
   7039  */
   7040 static void
   7041 wm_free_txrx_queues(struct wm_softc *sc)
   7042 {
   7043 	int i;
   7044 
   7045 	for (i = 0; i < sc->sc_nqueues; i++)
   7046 		rnd_detach_source(&sc->sc_queue[i].rnd_source);
   7047 
   7048 	for (i = 0; i < sc->sc_nqueues; i++) {
   7049 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7050 
   7051 #ifdef WM_EVENT_COUNTERS
   7052 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7053 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7054 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7055 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7056 #endif /* WM_EVENT_COUNTERS */
   7057 
   7058 		wm_free_rx_buffer(sc, rxq);
   7059 		wm_free_rx_descs(sc, rxq);
   7060 		if (rxq->rxq_lock)
   7061 			mutex_obj_free(rxq->rxq_lock);
   7062 	}
   7063 
   7064 	for (i = 0; i < sc->sc_nqueues; i++) {
   7065 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7066 		struct mbuf *m;
   7067 #ifdef WM_EVENT_COUNTERS
   7068 		int j;
   7069 
   7070 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7071 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7072 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7073 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7074 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7075 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7076 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7077 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7078 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7079 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7080 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7081 
   7082 		for (j = 0; j < WM_NTXSEGS; j++)
   7083 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7084 
   7085 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7086 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7087 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7088 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7089 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7090 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7091 #endif /* WM_EVENT_COUNTERS */
   7092 
   7093 		/* Drain txq_interq */
   7094 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7095 			m_freem(m);
   7096 		pcq_destroy(txq->txq_interq);
   7097 
   7098 		wm_free_tx_buffer(sc, txq);
   7099 		wm_free_tx_descs(sc, txq);
   7100 		if (txq->txq_lock)
   7101 			mutex_obj_free(txq->txq_lock);
   7102 	}
   7103 
   7104 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7105 }
   7106 
   7107 static void
   7108 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7109 {
   7110 
   7111 	KASSERT(mutex_owned(txq->txq_lock));
   7112 
   7113 	/* Initialize the transmit descriptor ring. */
   7114 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7115 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7116 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7117 	txq->txq_free = WM_NTXDESC(txq);
   7118 	txq->txq_next = 0;
   7119 }
   7120 
   7121 static void
   7122 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7123     struct wm_txqueue *txq)
   7124 {
   7125 
   7126 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7127 		device_xname(sc->sc_dev), __func__));
   7128 	KASSERT(mutex_owned(txq->txq_lock));
   7129 
   7130 	if (sc->sc_type < WM_T_82543) {
   7131 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7132 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7133 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7134 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7135 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7136 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7137 	} else {
   7138 		int qid = wmq->wmq_id;
   7139 
   7140 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7141 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7142 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7143 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7144 
   7145 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7146 			/*
   7147 			 * Don't write TDT before TCTL.EN is set.
   7148 			 * See the document.
   7149 			 */
   7150 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7151 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7152 			    | TXDCTL_WTHRESH(0));
   7153 		else {
   7154 			/* XXX should update with AIM? */
   7155 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7156 			if (sc->sc_type >= WM_T_82540) {
   7157 				/* Should be the same */
   7158 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7159 			}
   7160 
   7161 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7162 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7163 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7164 		}
   7165 	}
   7166 }
   7167 
   7168 static void
   7169 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7170 {
   7171 	int i;
   7172 
   7173 	KASSERT(mutex_owned(txq->txq_lock));
   7174 
   7175 	/* Initialize the transmit job descriptors. */
   7176 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7177 		txq->txq_soft[i].txs_mbuf = NULL;
   7178 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7179 	txq->txq_snext = 0;
   7180 	txq->txq_sdirty = 0;
   7181 }
   7182 
   7183 static void
   7184 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7185     struct wm_txqueue *txq)
   7186 {
   7187 
   7188 	KASSERT(mutex_owned(txq->txq_lock));
   7189 
   7190 	/*
   7191 	 * Set up some register offsets that are different between
   7192 	 * the i82542 and the i82543 and later chips.
   7193 	 */
   7194 	if (sc->sc_type < WM_T_82543)
   7195 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7196 	else
   7197 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7198 
   7199 	wm_init_tx_descs(sc, txq);
   7200 	wm_init_tx_regs(sc, wmq, txq);
   7201 	wm_init_tx_buffer(sc, txq);
   7202 
   7203 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7204 	txq->txq_sending = false;
   7205 }
   7206 
   7207 static void
   7208 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7209     struct wm_rxqueue *rxq)
   7210 {
   7211 
   7212 	KASSERT(mutex_owned(rxq->rxq_lock));
   7213 
   7214 	/*
   7215 	 * Initialize the receive descriptor and receive job
   7216 	 * descriptor rings.
   7217 	 */
   7218 	if (sc->sc_type < WM_T_82543) {
   7219 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7220 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7221 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7222 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7223 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7224 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7225 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7226 
   7227 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7228 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7229 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7230 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7231 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7232 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7233 	} else {
   7234 		int qid = wmq->wmq_id;
   7235 
   7236 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7237 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7238 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7239 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7240 
   7241 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7242 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7243 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7244 
   7245 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7246 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7247 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7248 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7249 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7250 			    | RXDCTL_WTHRESH(1));
   7251 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7252 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7253 		} else {
   7254 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7255 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7256 			/* XXX should update with AIM? */
   7257 			CSR_WRITE(sc, WMREG_RDTR,
   7258 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7259 			/* MUST be same */
   7260 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7261 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7262 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7263 		}
   7264 	}
   7265 }
   7266 
   7267 static int
   7268 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7269 {
   7270 	struct wm_rxsoft *rxs;
   7271 	int error, i;
   7272 
   7273 	KASSERT(mutex_owned(rxq->rxq_lock));
   7274 
   7275 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7276 		rxs = &rxq->rxq_soft[i];
   7277 		if (rxs->rxs_mbuf == NULL) {
   7278 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7279 				log(LOG_ERR, "%s: unable to allocate or map "
   7280 				    "rx buffer %d, error = %d\n",
   7281 				    device_xname(sc->sc_dev), i, error);
   7282 				/*
   7283 				 * XXX Should attempt to run with fewer receive
   7284 				 * XXX buffers instead of just failing.
   7285 				 */
   7286 				wm_rxdrain(rxq);
   7287 				return ENOMEM;
   7288 			}
   7289 		} else {
   7290 			/*
   7291 			 * For 82575 and 82576, the RX descriptors must be
   7292 			 * initialized after the setting of RCTL.EN in
   7293 			 * wm_set_filter()
   7294 			 */
   7295 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7296 				wm_init_rxdesc(rxq, i);
   7297 		}
   7298 	}
   7299 	rxq->rxq_ptr = 0;
   7300 	rxq->rxq_discard = 0;
   7301 	WM_RXCHAIN_RESET(rxq);
   7302 
   7303 	return 0;
   7304 }
   7305 
   7306 static int
   7307 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7308     struct wm_rxqueue *rxq)
   7309 {
   7310 
   7311 	KASSERT(mutex_owned(rxq->rxq_lock));
   7312 
   7313 	/*
   7314 	 * Set up some register offsets that are different between
   7315 	 * the i82542 and the i82543 and later chips.
   7316 	 */
   7317 	if (sc->sc_type < WM_T_82543)
   7318 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7319 	else
   7320 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7321 
   7322 	wm_init_rx_regs(sc, wmq, rxq);
   7323 	return wm_init_rx_buffer(sc, rxq);
   7324 }
   7325 
   7326 /*
   7327  * wm_init_quques:
   7328  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7329  */
   7330 static int
   7331 wm_init_txrx_queues(struct wm_softc *sc)
   7332 {
   7333 	int i, error = 0;
   7334 
   7335 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7336 		device_xname(sc->sc_dev), __func__));
   7337 
   7338 	for (i = 0; i < sc->sc_nqueues; i++) {
   7339 		struct wm_queue *wmq = &sc->sc_queue[i];
   7340 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7341 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7342 
   7343 		/*
   7344 		 * TODO
   7345 		 * Currently, use constant variable instead of AIM.
   7346 		 * Furthermore, the interrupt interval of multiqueue which use
   7347 		 * polling mode is less than default value.
   7348 		 * More tuning and AIM are required.
   7349 		 */
   7350 		if (wm_is_using_multiqueue(sc))
   7351 			wmq->wmq_itr = 50;
   7352 		else
   7353 			wmq->wmq_itr = sc->sc_itr_init;
   7354 		wmq->wmq_set_itr = true;
   7355 
   7356 		mutex_enter(txq->txq_lock);
   7357 		wm_init_tx_queue(sc, wmq, txq);
   7358 		mutex_exit(txq->txq_lock);
   7359 
   7360 		mutex_enter(rxq->rxq_lock);
   7361 		error = wm_init_rx_queue(sc, wmq, rxq);
   7362 		mutex_exit(rxq->rxq_lock);
   7363 		if (error)
   7364 			break;
   7365 	}
   7366 
   7367 	return error;
   7368 }
   7369 
   7370 /*
   7371  * wm_tx_offload:
   7372  *
   7373  *	Set up TCP/IP checksumming parameters for the
   7374  *	specified packet.
   7375  */
   7376 static void
   7377 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7378     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7379 {
   7380 	struct mbuf *m0 = txs->txs_mbuf;
   7381 	struct livengood_tcpip_ctxdesc *t;
   7382 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7383 	uint32_t ipcse;
   7384 	struct ether_header *eh;
   7385 	int offset, iphl;
   7386 	uint8_t fields;
   7387 
   7388 	/*
   7389 	 * XXX It would be nice if the mbuf pkthdr had offset
   7390 	 * fields for the protocol headers.
   7391 	 */
   7392 
   7393 	eh = mtod(m0, struct ether_header *);
   7394 	switch (htons(eh->ether_type)) {
   7395 	case ETHERTYPE_IP:
   7396 	case ETHERTYPE_IPV6:
   7397 		offset = ETHER_HDR_LEN;
   7398 		break;
   7399 
   7400 	case ETHERTYPE_VLAN:
   7401 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7402 		break;
   7403 
   7404 	default:
   7405 		/* Don't support this protocol or encapsulation. */
   7406  		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7407  		txq->txq_last_hw_ipcs = 0;
   7408  		txq->txq_last_hw_tucs = 0;
   7409 		*fieldsp = 0;
   7410 		*cmdp = 0;
   7411 		return;
   7412 	}
   7413 
   7414 	if ((m0->m_pkthdr.csum_flags &
   7415 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7416 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7417 	} else
   7418 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7419 
   7420 	ipcse = offset + iphl - 1;
   7421 
   7422 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7423 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7424 	seg = 0;
   7425 	fields = 0;
   7426 
   7427 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7428 		int hlen = offset + iphl;
   7429 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7430 
   7431 		if (__predict_false(m0->m_len <
   7432 				    (hlen + sizeof(struct tcphdr)))) {
   7433 			/*
   7434 			 * TCP/IP headers are not in the first mbuf; we need
   7435 			 * to do this the slow and painful way. Let's just
   7436 			 * hope this doesn't happen very often.
   7437 			 */
   7438 			struct tcphdr th;
   7439 
   7440 			WM_Q_EVCNT_INCR(txq, tsopain);
   7441 
   7442 			m_copydata(m0, hlen, sizeof(th), &th);
   7443 			if (v4) {
   7444 				struct ip ip;
   7445 
   7446 				m_copydata(m0, offset, sizeof(ip), &ip);
   7447 				ip.ip_len = 0;
   7448 				m_copyback(m0,
   7449 				    offset + offsetof(struct ip, ip_len),
   7450 				    sizeof(ip.ip_len), &ip.ip_len);
   7451 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7452 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7453 			} else {
   7454 				struct ip6_hdr ip6;
   7455 
   7456 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7457 				ip6.ip6_plen = 0;
   7458 				m_copyback(m0,
   7459 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7460 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7461 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7462 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7463 			}
   7464 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7465 			    sizeof(th.th_sum), &th.th_sum);
   7466 
   7467 			hlen += th.th_off << 2;
   7468 		} else {
   7469 			/*
   7470 			 * TCP/IP headers are in the first mbuf; we can do
   7471 			 * this the easy way.
   7472 			 */
   7473 			struct tcphdr *th;
   7474 
   7475 			if (v4) {
   7476 				struct ip *ip =
   7477 				    (void *)(mtod(m0, char *) + offset);
   7478 				th = (void *)(mtod(m0, char *) + hlen);
   7479 
   7480 				ip->ip_len = 0;
   7481 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7482 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7483 			} else {
   7484 				struct ip6_hdr *ip6 =
   7485 				    (void *)(mtod(m0, char *) + offset);
   7486 				th = (void *)(mtod(m0, char *) + hlen);
   7487 
   7488 				ip6->ip6_plen = 0;
   7489 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7490 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7491 			}
   7492 			hlen += th->th_off << 2;
   7493 		}
   7494 
   7495 		if (v4) {
   7496 			WM_Q_EVCNT_INCR(txq, tso);
   7497 			cmdlen |= WTX_TCPIP_CMD_IP;
   7498 		} else {
   7499 			WM_Q_EVCNT_INCR(txq, tso6);
   7500 			ipcse = 0;
   7501 		}
   7502 		cmd |= WTX_TCPIP_CMD_TSE;
   7503 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7504 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7505 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7506 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7507 	}
   7508 
   7509 	/*
   7510 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7511 	 * offload feature, if we load the context descriptor, we
   7512 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7513 	 */
   7514 
   7515 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7516 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7517 	    WTX_TCPIP_IPCSE(ipcse);
   7518 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7519 		WM_Q_EVCNT_INCR(txq, ipsum);
   7520 		fields |= WTX_IXSM;
   7521 	}
   7522 
   7523 	offset += iphl;
   7524 
   7525 	if (m0->m_pkthdr.csum_flags &
   7526 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7527 		WM_Q_EVCNT_INCR(txq, tusum);
   7528 		fields |= WTX_TXSM;
   7529 		tucs = WTX_TCPIP_TUCSS(offset) |
   7530 		    WTX_TCPIP_TUCSO(offset +
   7531 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7532 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7533 	} else if ((m0->m_pkthdr.csum_flags &
   7534 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7535 		WM_Q_EVCNT_INCR(txq, tusum6);
   7536 		fields |= WTX_TXSM;
   7537 		tucs = WTX_TCPIP_TUCSS(offset) |
   7538 		    WTX_TCPIP_TUCSO(offset +
   7539 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7540 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7541 	} else {
   7542 		/* Just initialize it to a valid TCP context. */
   7543 		tucs = WTX_TCPIP_TUCSS(offset) |
   7544 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7545 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7546 	}
   7547 
   7548 	*cmdp = cmd;
   7549 	*fieldsp = fields;
   7550 
   7551 	/*
   7552 	 * We don't have to write context descriptor for every packet
   7553 	 * except for 82574. For 82574, we must write context descriptor
   7554 	 * for every packet when we use two descriptor queues.
   7555 	 *
   7556 	 * The 82574L can only remember the *last* context used
   7557 	 * regardless of queue that it was use for.  We cannot reuse
   7558 	 * contexts on this hardware platform and must generate a new
   7559 	 * context every time.  82574L hardware spec, section 7.2.6,
   7560 	 * second note.
   7561 	 */
   7562 	if (sc->sc_nqueues < 2) {
   7563 		/*
   7564 	 	 *
   7565 	  	 * Setting up new checksum offload context for every
   7566 		 * frames takes a lot of processing time for hardware.
   7567 		 * This also reduces performance a lot for small sized
   7568 		 * frames so avoid it if driver can use previously
   7569 		 * configured checksum offload context.
   7570 		 * For TSO, in theory we can use the same TSO context only if
   7571 		 * frame is the same type(IP/TCP) and the same MSS. However
   7572 		 * checking whether a frame has the same IP/TCP structure is
   7573 		 * hard thing so just ignore that and always restablish a
   7574 		 * new TSO context.
   7575 	  	 */
   7576 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   7577 		    == 0) {
   7578 			if (txq->txq_last_hw_cmd == cmd &&
   7579 			    txq->txq_last_hw_fields == fields &&
   7580 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   7581 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   7582 				WM_Q_EVCNT_INCR(txq, skipcontext);
   7583 				return;
   7584 			}
   7585 		}
   7586 
   7587 	 	txq->txq_last_hw_cmd = cmd;
   7588  		txq->txq_last_hw_fields = fields;
   7589  		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   7590 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   7591 	}
   7592 
   7593 	/* Fill in the context descriptor. */
   7594 	t = (struct livengood_tcpip_ctxdesc *)
   7595 	    &txq->txq_descs[txq->txq_next];
   7596 	t->tcpip_ipcs = htole32(ipcs);
   7597 	t->tcpip_tucs = htole32(tucs);
   7598 	t->tcpip_cmdlen = htole32(cmdlen);
   7599 	t->tcpip_seg = htole32(seg);
   7600 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7601 
   7602 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7603 	txs->txs_ndesc++;
   7604 }
   7605 
   7606 static inline int
   7607 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7608 {
   7609 	struct wm_softc *sc = ifp->if_softc;
   7610 	u_int cpuid = cpu_index(curcpu());
   7611 
   7612 	/*
   7613 	 * Currently, simple distribute strategy.
   7614 	 * TODO:
   7615 	 * distribute by flowid(RSS has value).
   7616 	 */
   7617 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7618 }
   7619 
   7620 /*
   7621  * wm_start:		[ifnet interface function]
   7622  *
   7623  *	Start packet transmission on the interface.
   7624  */
   7625 static void
   7626 wm_start(struct ifnet *ifp)
   7627 {
   7628 	struct wm_softc *sc = ifp->if_softc;
   7629 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7630 
   7631 #ifdef WM_MPSAFE
   7632 	KASSERT(if_is_mpsafe(ifp));
   7633 #endif
   7634 	/*
   7635 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7636 	 */
   7637 
   7638 	mutex_enter(txq->txq_lock);
   7639 	if (!txq->txq_stopping)
   7640 		wm_start_locked(ifp);
   7641 	mutex_exit(txq->txq_lock);
   7642 }
   7643 
   7644 static void
   7645 wm_start_locked(struct ifnet *ifp)
   7646 {
   7647 	struct wm_softc *sc = ifp->if_softc;
   7648 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7649 
   7650 	wm_send_common_locked(ifp, txq, false);
   7651 }
   7652 
   7653 static int
   7654 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7655 {
   7656 	int qid;
   7657 	struct wm_softc *sc = ifp->if_softc;
   7658 	struct wm_txqueue *txq;
   7659 
   7660 	qid = wm_select_txqueue(ifp, m);
   7661 	txq = &sc->sc_queue[qid].wmq_txq;
   7662 
   7663 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7664 		m_freem(m);
   7665 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7666 		return ENOBUFS;
   7667 	}
   7668 
   7669 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7670 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7671 	if (m->m_flags & M_MCAST)
   7672 		if_statinc_ref(nsr, if_omcasts);
   7673 	IF_STAT_PUTREF(ifp);
   7674 
   7675 	if (mutex_tryenter(txq->txq_lock)) {
   7676 		if (!txq->txq_stopping)
   7677 			wm_transmit_locked(ifp, txq);
   7678 		mutex_exit(txq->txq_lock);
   7679 	}
   7680 
   7681 	return 0;
   7682 }
   7683 
   7684 static void
   7685 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7686 {
   7687 
   7688 	wm_send_common_locked(ifp, txq, true);
   7689 }
   7690 
   7691 static void
   7692 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7693     bool is_transmit)
   7694 {
   7695 	struct wm_softc *sc = ifp->if_softc;
   7696 	struct mbuf *m0;
   7697 	struct wm_txsoft *txs;
   7698 	bus_dmamap_t dmamap;
   7699 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7700 	bus_addr_t curaddr;
   7701 	bus_size_t seglen, curlen;
   7702 	uint32_t cksumcmd;
   7703 	uint8_t cksumfields;
   7704 	bool remap = true;
   7705 
   7706 	KASSERT(mutex_owned(txq->txq_lock));
   7707 
   7708 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7709 		return;
   7710 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7711 		return;
   7712 
   7713 	/* Remember the previous number of free descriptors. */
   7714 	ofree = txq->txq_free;
   7715 
   7716 	/*
   7717 	 * Loop through the send queue, setting up transmit descriptors
   7718 	 * until we drain the queue, or use up all available transmit
   7719 	 * descriptors.
   7720 	 */
   7721 	for (;;) {
   7722 		m0 = NULL;
   7723 
   7724 		/* Get a work queue entry. */
   7725 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7726 			wm_txeof(txq, UINT_MAX);
   7727 			if (txq->txq_sfree == 0) {
   7728 				DPRINTF(WM_DEBUG_TX,
   7729 				    ("%s: TX: no free job descriptors\n",
   7730 					device_xname(sc->sc_dev)));
   7731 				WM_Q_EVCNT_INCR(txq, txsstall);
   7732 				break;
   7733 			}
   7734 		}
   7735 
   7736 		/* Grab a packet off the queue. */
   7737 		if (is_transmit)
   7738 			m0 = pcq_get(txq->txq_interq);
   7739 		else
   7740 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7741 		if (m0 == NULL)
   7742 			break;
   7743 
   7744 		DPRINTF(WM_DEBUG_TX,
   7745 		    ("%s: TX: have packet to transmit: %p\n",
   7746 			device_xname(sc->sc_dev), m0));
   7747 
   7748 		txs = &txq->txq_soft[txq->txq_snext];
   7749 		dmamap = txs->txs_dmamap;
   7750 
   7751 		use_tso = (m0->m_pkthdr.csum_flags &
   7752 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7753 
   7754 		/*
   7755 		 * So says the Linux driver:
   7756 		 * The controller does a simple calculation to make sure
   7757 		 * there is enough room in the FIFO before initiating the
   7758 		 * DMA for each buffer. The calc is:
   7759 		 *	4 = ceil(buffer len / MSS)
   7760 		 * To make sure we don't overrun the FIFO, adjust the max
   7761 		 * buffer len if the MSS drops.
   7762 		 */
   7763 		dmamap->dm_maxsegsz =
   7764 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7765 		    ? m0->m_pkthdr.segsz << 2
   7766 		    : WTX_MAX_LEN;
   7767 
   7768 		/*
   7769 		 * Load the DMA map.  If this fails, the packet either
   7770 		 * didn't fit in the allotted number of segments, or we
   7771 		 * were short on resources.  For the too-many-segments
   7772 		 * case, we simply report an error and drop the packet,
   7773 		 * since we can't sanely copy a jumbo packet to a single
   7774 		 * buffer.
   7775 		 */
   7776 retry:
   7777 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7778 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7779 		if (__predict_false(error)) {
   7780 			if (error == EFBIG) {
   7781 				if (remap == true) {
   7782 					struct mbuf *m;
   7783 
   7784 					remap = false;
   7785 					m = m_defrag(m0, M_NOWAIT);
   7786 					if (m != NULL) {
   7787 						WM_Q_EVCNT_INCR(txq, defrag);
   7788 						m0 = m;
   7789 						goto retry;
   7790 					}
   7791 				}
   7792 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7793 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7794 				    "DMA segments, dropping...\n",
   7795 				    device_xname(sc->sc_dev));
   7796 				wm_dump_mbuf_chain(sc, m0);
   7797 				m_freem(m0);
   7798 				continue;
   7799 			}
   7800 			/* Short on resources, just stop for now. */
   7801 			DPRINTF(WM_DEBUG_TX,
   7802 			    ("%s: TX: dmamap load failed: %d\n",
   7803 				device_xname(sc->sc_dev), error));
   7804 			break;
   7805 		}
   7806 
   7807 		segs_needed = dmamap->dm_nsegs;
   7808 		if (use_tso) {
   7809 			/* For sentinel descriptor; see below. */
   7810 			segs_needed++;
   7811 		}
   7812 
   7813 		/*
   7814 		 * Ensure we have enough descriptors free to describe
   7815 		 * the packet. Note, we always reserve one descriptor
   7816 		 * at the end of the ring due to the semantics of the
   7817 		 * TDT register, plus one more in the event we need
   7818 		 * to load offload context.
   7819 		 */
   7820 		if (segs_needed > txq->txq_free - 2) {
   7821 			/*
   7822 			 * Not enough free descriptors to transmit this
   7823 			 * packet.  We haven't committed anything yet,
   7824 			 * so just unload the DMA map, put the packet
   7825 			 * pack on the queue, and punt. Notify the upper
   7826 			 * layer that there are no more slots left.
   7827 			 */
   7828 			DPRINTF(WM_DEBUG_TX,
   7829 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7830 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7831 				segs_needed, txq->txq_free - 1));
   7832 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7833 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7834 			WM_Q_EVCNT_INCR(txq, txdstall);
   7835 			break;
   7836 		}
   7837 
   7838 		/*
   7839 		 * Check for 82547 Tx FIFO bug. We need to do this
   7840 		 * once we know we can transmit the packet, since we
   7841 		 * do some internal FIFO space accounting here.
   7842 		 */
   7843 		if (sc->sc_type == WM_T_82547 &&
   7844 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7845 			DPRINTF(WM_DEBUG_TX,
   7846 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7847 				device_xname(sc->sc_dev)));
   7848 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7849 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7850 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7851 			break;
   7852 		}
   7853 
   7854 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7855 
   7856 		DPRINTF(WM_DEBUG_TX,
   7857 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7858 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7859 
   7860 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7861 
   7862 		/*
   7863 		 * Store a pointer to the packet so that we can free it
   7864 		 * later.
   7865 		 *
   7866 		 * Initially, we consider the number of descriptors the
   7867 		 * packet uses the number of DMA segments.  This may be
   7868 		 * incremented by 1 if we do checksum offload (a descriptor
   7869 		 * is used to set the checksum context).
   7870 		 */
   7871 		txs->txs_mbuf = m0;
   7872 		txs->txs_firstdesc = txq->txq_next;
   7873 		txs->txs_ndesc = segs_needed;
   7874 
   7875 		/* Set up offload parameters for this packet. */
   7876 		if (m0->m_pkthdr.csum_flags &
   7877 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7878 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7879 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7880 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   7881 		} else {
   7882  			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7883  			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   7884 			cksumcmd = 0;
   7885 			cksumfields = 0;
   7886 		}
   7887 
   7888 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7889 
   7890 		/* Sync the DMA map. */
   7891 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7892 		    BUS_DMASYNC_PREWRITE);
   7893 
   7894 		/* Initialize the transmit descriptor. */
   7895 		for (nexttx = txq->txq_next, seg = 0;
   7896 		     seg < dmamap->dm_nsegs; seg++) {
   7897 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7898 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7899 			     seglen != 0;
   7900 			     curaddr += curlen, seglen -= curlen,
   7901 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7902 				curlen = seglen;
   7903 
   7904 				/*
   7905 				 * So says the Linux driver:
   7906 				 * Work around for premature descriptor
   7907 				 * write-backs in TSO mode.  Append a
   7908 				 * 4-byte sentinel descriptor.
   7909 				 */
   7910 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7911 				    curlen > 8)
   7912 					curlen -= 4;
   7913 
   7914 				wm_set_dma_addr(
   7915 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7916 				txq->txq_descs[nexttx].wtx_cmdlen
   7917 				    = htole32(cksumcmd | curlen);
   7918 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7919 				    = 0;
   7920 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7921 				    = cksumfields;
   7922 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7923 				lasttx = nexttx;
   7924 
   7925 				DPRINTF(WM_DEBUG_TX,
   7926 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7927 					"len %#04zx\n",
   7928 					device_xname(sc->sc_dev), nexttx,
   7929 					(uint64_t)curaddr, curlen));
   7930 			}
   7931 		}
   7932 
   7933 		KASSERT(lasttx != -1);
   7934 
   7935 		/*
   7936 		 * Set up the command byte on the last descriptor of
   7937 		 * the packet. If we're in the interrupt delay window,
   7938 		 * delay the interrupt.
   7939 		 */
   7940 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7941 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7942 
   7943 		/*
   7944 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7945 		 * up the descriptor to encapsulate the packet for us.
   7946 		 *
   7947 		 * This is only valid on the last descriptor of the packet.
   7948 		 */
   7949 		if (vlan_has_tag(m0)) {
   7950 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7951 			    htole32(WTX_CMD_VLE);
   7952 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7953 			    = htole16(vlan_get_tag(m0));
   7954 		}
   7955 
   7956 		txs->txs_lastdesc = lasttx;
   7957 
   7958 		DPRINTF(WM_DEBUG_TX,
   7959 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7960 			device_xname(sc->sc_dev),
   7961 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7962 
   7963 		/* Sync the descriptors we're using. */
   7964 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7965 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7966 
   7967 		/* Give the packet to the chip. */
   7968 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7969 
   7970 		DPRINTF(WM_DEBUG_TX,
   7971 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7972 
   7973 		DPRINTF(WM_DEBUG_TX,
   7974 		    ("%s: TX: finished transmitting packet, job %d\n",
   7975 			device_xname(sc->sc_dev), txq->txq_snext));
   7976 
   7977 		/* Advance the tx pointer. */
   7978 		txq->txq_free -= txs->txs_ndesc;
   7979 		txq->txq_next = nexttx;
   7980 
   7981 		txq->txq_sfree--;
   7982 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7983 
   7984 		/* Pass the packet to any BPF listeners. */
   7985 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7986 	}
   7987 
   7988 	if (m0 != NULL) {
   7989 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7990 		WM_Q_EVCNT_INCR(txq, descdrop);
   7991 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7992 			__func__));
   7993 		m_freem(m0);
   7994 	}
   7995 
   7996 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7997 		/* No more slots; notify upper layer. */
   7998 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7999 	}
   8000 
   8001 	if (txq->txq_free != ofree) {
   8002 		/* Set a watchdog timer in case the chip flakes out. */
   8003 		txq->txq_lastsent = time_uptime;
   8004 		txq->txq_sending = true;
   8005 	}
   8006 }
   8007 
   8008 /*
   8009  * wm_nq_tx_offload:
   8010  *
   8011  *	Set up TCP/IP checksumming parameters for the
   8012  *	specified packet, for NEWQUEUE devices
   8013  */
   8014 static void
   8015 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8016     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8017 {
   8018 	struct mbuf *m0 = txs->txs_mbuf;
   8019 	uint32_t vl_len, mssidx, cmdc;
   8020 	struct ether_header *eh;
   8021 	int offset, iphl;
   8022 
   8023 	/*
   8024 	 * XXX It would be nice if the mbuf pkthdr had offset
   8025 	 * fields for the protocol headers.
   8026 	 */
   8027 	*cmdlenp = 0;
   8028 	*fieldsp = 0;
   8029 
   8030 	eh = mtod(m0, struct ether_header *);
   8031 	switch (htons(eh->ether_type)) {
   8032 	case ETHERTYPE_IP:
   8033 	case ETHERTYPE_IPV6:
   8034 		offset = ETHER_HDR_LEN;
   8035 		break;
   8036 
   8037 	case ETHERTYPE_VLAN:
   8038 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8039 		break;
   8040 
   8041 	default:
   8042 		/* Don't support this protocol or encapsulation. */
   8043 		*do_csum = false;
   8044 		return;
   8045 	}
   8046 	*do_csum = true;
   8047 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8048 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8049 
   8050 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8051 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8052 
   8053 	if ((m0->m_pkthdr.csum_flags &
   8054 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8055 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8056 	} else {
   8057 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8058 	}
   8059 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8060 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8061 
   8062 	if (vlan_has_tag(m0)) {
   8063 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8064 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8065 		*cmdlenp |= NQTX_CMD_VLE;
   8066 	}
   8067 
   8068 	mssidx = 0;
   8069 
   8070 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8071 		int hlen = offset + iphl;
   8072 		int tcp_hlen;
   8073 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8074 
   8075 		if (__predict_false(m0->m_len <
   8076 				    (hlen + sizeof(struct tcphdr)))) {
   8077 			/*
   8078 			 * TCP/IP headers are not in the first mbuf; we need
   8079 			 * to do this the slow and painful way. Let's just
   8080 			 * hope this doesn't happen very often.
   8081 			 */
   8082 			struct tcphdr th;
   8083 
   8084 			WM_Q_EVCNT_INCR(txq, tsopain);
   8085 
   8086 			m_copydata(m0, hlen, sizeof(th), &th);
   8087 			if (v4) {
   8088 				struct ip ip;
   8089 
   8090 				m_copydata(m0, offset, sizeof(ip), &ip);
   8091 				ip.ip_len = 0;
   8092 				m_copyback(m0,
   8093 				    offset + offsetof(struct ip, ip_len),
   8094 				    sizeof(ip.ip_len), &ip.ip_len);
   8095 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8096 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8097 			} else {
   8098 				struct ip6_hdr ip6;
   8099 
   8100 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8101 				ip6.ip6_plen = 0;
   8102 				m_copyback(m0,
   8103 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8104 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8105 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8106 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8107 			}
   8108 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8109 			    sizeof(th.th_sum), &th.th_sum);
   8110 
   8111 			tcp_hlen = th.th_off << 2;
   8112 		} else {
   8113 			/*
   8114 			 * TCP/IP headers are in the first mbuf; we can do
   8115 			 * this the easy way.
   8116 			 */
   8117 			struct tcphdr *th;
   8118 
   8119 			if (v4) {
   8120 				struct ip *ip =
   8121 				    (void *)(mtod(m0, char *) + offset);
   8122 				th = (void *)(mtod(m0, char *) + hlen);
   8123 
   8124 				ip->ip_len = 0;
   8125 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8126 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8127 			} else {
   8128 				struct ip6_hdr *ip6 =
   8129 				    (void *)(mtod(m0, char *) + offset);
   8130 				th = (void *)(mtod(m0, char *) + hlen);
   8131 
   8132 				ip6->ip6_plen = 0;
   8133 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8134 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8135 			}
   8136 			tcp_hlen = th->th_off << 2;
   8137 		}
   8138 		hlen += tcp_hlen;
   8139 		*cmdlenp |= NQTX_CMD_TSE;
   8140 
   8141 		if (v4) {
   8142 			WM_Q_EVCNT_INCR(txq, tso);
   8143 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8144 		} else {
   8145 			WM_Q_EVCNT_INCR(txq, tso6);
   8146 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8147 		}
   8148 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8149 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8150 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8151 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8152 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8153 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8154 	} else {
   8155 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8156 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8157 	}
   8158 
   8159 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8160 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8161 		cmdc |= NQTXC_CMD_IP4;
   8162 	}
   8163 
   8164 	if (m0->m_pkthdr.csum_flags &
   8165 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8166 		WM_Q_EVCNT_INCR(txq, tusum);
   8167 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8168 			cmdc |= NQTXC_CMD_TCP;
   8169 		else
   8170 			cmdc |= NQTXC_CMD_UDP;
   8171 
   8172 		cmdc |= NQTXC_CMD_IP4;
   8173 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8174 	}
   8175 	if (m0->m_pkthdr.csum_flags &
   8176 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8177 		WM_Q_EVCNT_INCR(txq, tusum6);
   8178 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8179 			cmdc |= NQTXC_CMD_TCP;
   8180 		else
   8181 			cmdc |= NQTXC_CMD_UDP;
   8182 
   8183 		cmdc |= NQTXC_CMD_IP6;
   8184 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8185 	}
   8186 
   8187 	/*
   8188 	 * We don't have to write context descriptor for every packet to
   8189 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8190 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8191 	 * controllers.
   8192 	 * It would be overhead to write context descriptor for every packet,
   8193 	 * however it does not cause problems.
   8194 	 */
   8195 	/* Fill in the context descriptor. */
   8196 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8197 	    htole32(vl_len);
   8198 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8199 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8200 	    htole32(cmdc);
   8201 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8202 	    htole32(mssidx);
   8203 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8204 	DPRINTF(WM_DEBUG_TX,
   8205 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8206 		txq->txq_next, 0, vl_len));
   8207 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8208 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8209 	txs->txs_ndesc++;
   8210 }
   8211 
   8212 /*
   8213  * wm_nq_start:		[ifnet interface function]
   8214  *
   8215  *	Start packet transmission on the interface for NEWQUEUE devices
   8216  */
   8217 static void
   8218 wm_nq_start(struct ifnet *ifp)
   8219 {
   8220 	struct wm_softc *sc = ifp->if_softc;
   8221 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8222 
   8223 #ifdef WM_MPSAFE
   8224 	KASSERT(if_is_mpsafe(ifp));
   8225 #endif
   8226 	/*
   8227 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8228 	 */
   8229 
   8230 	mutex_enter(txq->txq_lock);
   8231 	if (!txq->txq_stopping)
   8232 		wm_nq_start_locked(ifp);
   8233 	mutex_exit(txq->txq_lock);
   8234 }
   8235 
   8236 static void
   8237 wm_nq_start_locked(struct ifnet *ifp)
   8238 {
   8239 	struct wm_softc *sc = ifp->if_softc;
   8240 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8241 
   8242 	wm_nq_send_common_locked(ifp, txq, false);
   8243 }
   8244 
   8245 static int
   8246 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8247 {
   8248 	int qid;
   8249 	struct wm_softc *sc = ifp->if_softc;
   8250 	struct wm_txqueue *txq;
   8251 
   8252 	qid = wm_select_txqueue(ifp, m);
   8253 	txq = &sc->sc_queue[qid].wmq_txq;
   8254 
   8255 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8256 		m_freem(m);
   8257 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8258 		return ENOBUFS;
   8259 	}
   8260 
   8261 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8262 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8263 	if (m->m_flags & M_MCAST)
   8264 		if_statinc_ref(nsr, if_omcasts);
   8265 	IF_STAT_PUTREF(ifp);
   8266 
   8267 	/*
   8268 	 * The situations which this mutex_tryenter() fails at running time
   8269 	 * are below two patterns.
   8270 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8271 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8272 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8273 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8274 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8275 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8276 	 * stuck, either.
   8277 	 */
   8278 	if (mutex_tryenter(txq->txq_lock)) {
   8279 		if (!txq->txq_stopping)
   8280 			wm_nq_transmit_locked(ifp, txq);
   8281 		mutex_exit(txq->txq_lock);
   8282 	}
   8283 
   8284 	return 0;
   8285 }
   8286 
   8287 static void
   8288 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8289 {
   8290 
   8291 	wm_nq_send_common_locked(ifp, txq, true);
   8292 }
   8293 
   8294 static void
   8295 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8296     bool is_transmit)
   8297 {
   8298 	struct wm_softc *sc = ifp->if_softc;
   8299 	struct mbuf *m0;
   8300 	struct wm_txsoft *txs;
   8301 	bus_dmamap_t dmamap;
   8302 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8303 	bool do_csum, sent;
   8304 	bool remap = true;
   8305 
   8306 	KASSERT(mutex_owned(txq->txq_lock));
   8307 
   8308 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8309 		return;
   8310 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8311 		return;
   8312 
   8313 	sent = false;
   8314 
   8315 	/*
   8316 	 * Loop through the send queue, setting up transmit descriptors
   8317 	 * until we drain the queue, or use up all available transmit
   8318 	 * descriptors.
   8319 	 */
   8320 	for (;;) {
   8321 		m0 = NULL;
   8322 
   8323 		/* Get a work queue entry. */
   8324 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8325 			wm_txeof(txq, UINT_MAX);
   8326 			if (txq->txq_sfree == 0) {
   8327 				DPRINTF(WM_DEBUG_TX,
   8328 				    ("%s: TX: no free job descriptors\n",
   8329 					device_xname(sc->sc_dev)));
   8330 				WM_Q_EVCNT_INCR(txq, txsstall);
   8331 				break;
   8332 			}
   8333 		}
   8334 
   8335 		/* Grab a packet off the queue. */
   8336 		if (is_transmit)
   8337 			m0 = pcq_get(txq->txq_interq);
   8338 		else
   8339 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8340 		if (m0 == NULL)
   8341 			break;
   8342 
   8343 		DPRINTF(WM_DEBUG_TX,
   8344 		    ("%s: TX: have packet to transmit: %p\n",
   8345 		    device_xname(sc->sc_dev), m0));
   8346 
   8347 		txs = &txq->txq_soft[txq->txq_snext];
   8348 		dmamap = txs->txs_dmamap;
   8349 
   8350 		/*
   8351 		 * Load the DMA map.  If this fails, the packet either
   8352 		 * didn't fit in the allotted number of segments, or we
   8353 		 * were short on resources.  For the too-many-segments
   8354 		 * case, we simply report an error and drop the packet,
   8355 		 * since we can't sanely copy a jumbo packet to a single
   8356 		 * buffer.
   8357 		 */
   8358 retry:
   8359 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8360 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8361 		if (__predict_false(error)) {
   8362 			if (error == EFBIG) {
   8363 				if (remap == true) {
   8364 					struct mbuf *m;
   8365 
   8366 					remap = false;
   8367 					m = m_defrag(m0, M_NOWAIT);
   8368 					if (m != NULL) {
   8369 						WM_Q_EVCNT_INCR(txq, defrag);
   8370 						m0 = m;
   8371 						goto retry;
   8372 					}
   8373 				}
   8374 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8375 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8376 				    "DMA segments, dropping...\n",
   8377 				    device_xname(sc->sc_dev));
   8378 				wm_dump_mbuf_chain(sc, m0);
   8379 				m_freem(m0);
   8380 				continue;
   8381 			}
   8382 			/* Short on resources, just stop for now. */
   8383 			DPRINTF(WM_DEBUG_TX,
   8384 			    ("%s: TX: dmamap load failed: %d\n",
   8385 				device_xname(sc->sc_dev), error));
   8386 			break;
   8387 		}
   8388 
   8389 		segs_needed = dmamap->dm_nsegs;
   8390 
   8391 		/*
   8392 		 * Ensure we have enough descriptors free to describe
   8393 		 * the packet. Note, we always reserve one descriptor
   8394 		 * at the end of the ring due to the semantics of the
   8395 		 * TDT register, plus one more in the event we need
   8396 		 * to load offload context.
   8397 		 */
   8398 		if (segs_needed > txq->txq_free - 2) {
   8399 			/*
   8400 			 * Not enough free descriptors to transmit this
   8401 			 * packet.  We haven't committed anything yet,
   8402 			 * so just unload the DMA map, put the packet
   8403 			 * pack on the queue, and punt. Notify the upper
   8404 			 * layer that there are no more slots left.
   8405 			 */
   8406 			DPRINTF(WM_DEBUG_TX,
   8407 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8408 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8409 				segs_needed, txq->txq_free - 1));
   8410 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8411 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8412 			WM_Q_EVCNT_INCR(txq, txdstall);
   8413 			break;
   8414 		}
   8415 
   8416 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8417 
   8418 		DPRINTF(WM_DEBUG_TX,
   8419 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8420 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8421 
   8422 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8423 
   8424 		/*
   8425 		 * Store a pointer to the packet so that we can free it
   8426 		 * later.
   8427 		 *
   8428 		 * Initially, we consider the number of descriptors the
   8429 		 * packet uses the number of DMA segments.  This may be
   8430 		 * incremented by 1 if we do checksum offload (a descriptor
   8431 		 * is used to set the checksum context).
   8432 		 */
   8433 		txs->txs_mbuf = m0;
   8434 		txs->txs_firstdesc = txq->txq_next;
   8435 		txs->txs_ndesc = segs_needed;
   8436 
   8437 		/* Set up offload parameters for this packet. */
   8438 		uint32_t cmdlen, fields, dcmdlen;
   8439 		if (m0->m_pkthdr.csum_flags &
   8440 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8441 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8442 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8443 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8444 			    &do_csum);
   8445 		} else {
   8446 			do_csum = false;
   8447 			cmdlen = 0;
   8448 			fields = 0;
   8449 		}
   8450 
   8451 		/* Sync the DMA map. */
   8452 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8453 		    BUS_DMASYNC_PREWRITE);
   8454 
   8455 		/* Initialize the first transmit descriptor. */
   8456 		nexttx = txq->txq_next;
   8457 		if (!do_csum) {
   8458 			/* Setup a legacy descriptor */
   8459 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8460 			    dmamap->dm_segs[0].ds_addr);
   8461 			txq->txq_descs[nexttx].wtx_cmdlen =
   8462 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8463 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8464 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8465 			if (vlan_has_tag(m0)) {
   8466 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8467 				    htole32(WTX_CMD_VLE);
   8468 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8469 				    htole16(vlan_get_tag(m0));
   8470 			} else
   8471 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8472 
   8473 			dcmdlen = 0;
   8474 		} else {
   8475 			/* Setup an advanced data descriptor */
   8476 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8477 			    htole64(dmamap->dm_segs[0].ds_addr);
   8478 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8479 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8480 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8481 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8482 			    htole32(fields);
   8483 			DPRINTF(WM_DEBUG_TX,
   8484 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8485 				device_xname(sc->sc_dev), nexttx,
   8486 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8487 			DPRINTF(WM_DEBUG_TX,
   8488 			    ("\t 0x%08x%08x\n", fields,
   8489 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8490 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8491 		}
   8492 
   8493 		lasttx = nexttx;
   8494 		nexttx = WM_NEXTTX(txq, nexttx);
   8495 		/*
   8496 		 * Fill in the next descriptors. legacy or advanced format
   8497 		 * is the same here
   8498 		 */
   8499 		for (seg = 1; seg < dmamap->dm_nsegs;
   8500 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8501 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8502 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8503 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8504 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8505 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8506 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8507 			lasttx = nexttx;
   8508 
   8509 			DPRINTF(WM_DEBUG_TX,
   8510 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8511 				device_xname(sc->sc_dev), nexttx,
   8512 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8513 				dmamap->dm_segs[seg].ds_len));
   8514 		}
   8515 
   8516 		KASSERT(lasttx != -1);
   8517 
   8518 		/*
   8519 		 * Set up the command byte on the last descriptor of
   8520 		 * the packet. If we're in the interrupt delay window,
   8521 		 * delay the interrupt.
   8522 		 */
   8523 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8524 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8525 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8526 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8527 
   8528 		txs->txs_lastdesc = lasttx;
   8529 
   8530 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8531 		    device_xname(sc->sc_dev),
   8532 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8533 
   8534 		/* Sync the descriptors we're using. */
   8535 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8536 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8537 
   8538 		/* Give the packet to the chip. */
   8539 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8540 		sent = true;
   8541 
   8542 		DPRINTF(WM_DEBUG_TX,
   8543 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8544 
   8545 		DPRINTF(WM_DEBUG_TX,
   8546 		    ("%s: TX: finished transmitting packet, job %d\n",
   8547 			device_xname(sc->sc_dev), txq->txq_snext));
   8548 
   8549 		/* Advance the tx pointer. */
   8550 		txq->txq_free -= txs->txs_ndesc;
   8551 		txq->txq_next = nexttx;
   8552 
   8553 		txq->txq_sfree--;
   8554 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8555 
   8556 		/* Pass the packet to any BPF listeners. */
   8557 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8558 	}
   8559 
   8560 	if (m0 != NULL) {
   8561 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8562 		WM_Q_EVCNT_INCR(txq, descdrop);
   8563 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8564 			__func__));
   8565 		m_freem(m0);
   8566 	}
   8567 
   8568 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8569 		/* No more slots; notify upper layer. */
   8570 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8571 	}
   8572 
   8573 	if (sent) {
   8574 		/* Set a watchdog timer in case the chip flakes out. */
   8575 		txq->txq_lastsent = time_uptime;
   8576 		txq->txq_sending = true;
   8577 	}
   8578 }
   8579 
   8580 static void
   8581 wm_deferred_start_locked(struct wm_txqueue *txq)
   8582 {
   8583 	struct wm_softc *sc = txq->txq_sc;
   8584 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8585 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8586 	int qid = wmq->wmq_id;
   8587 
   8588 	KASSERT(mutex_owned(txq->txq_lock));
   8589 
   8590 	if (txq->txq_stopping) {
   8591 		mutex_exit(txq->txq_lock);
   8592 		return;
   8593 	}
   8594 
   8595 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8596 		/* XXX need for ALTQ or one CPU system */
   8597 		if (qid == 0)
   8598 			wm_nq_start_locked(ifp);
   8599 		wm_nq_transmit_locked(ifp, txq);
   8600 	} else {
   8601 		/* XXX need for ALTQ or one CPU system */
   8602 		if (qid == 0)
   8603 			wm_start_locked(ifp);
   8604 		wm_transmit_locked(ifp, txq);
   8605 	}
   8606 }
   8607 
   8608 /* Interrupt */
   8609 
   8610 /*
   8611  * wm_txeof:
   8612  *
   8613  *	Helper; handle transmit interrupts.
   8614  */
   8615 static bool
   8616 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8617 {
   8618 	struct wm_softc *sc = txq->txq_sc;
   8619 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8620 	struct wm_txsoft *txs;
   8621 	int count = 0;
   8622 	int i;
   8623 	uint8_t status;
   8624 	bool more = false;
   8625 
   8626 	KASSERT(mutex_owned(txq->txq_lock));
   8627 
   8628 	if (txq->txq_stopping)
   8629 		return false;
   8630 
   8631 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8632 
   8633 	/*
   8634 	 * Go through the Tx list and free mbufs for those
   8635 	 * frames which have been transmitted.
   8636 	 */
   8637 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8638 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8639 		if (limit-- == 0) {
   8640 			more = true;
   8641 			DPRINTF(WM_DEBUG_TX,
   8642 			    ("%s: TX: loop limited, job %d is not processed\n",
   8643 				device_xname(sc->sc_dev), i));
   8644 			break;
   8645 		}
   8646 
   8647 		txs = &txq->txq_soft[i];
   8648 
   8649 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8650 			device_xname(sc->sc_dev), i));
   8651 
   8652 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8653 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8654 
   8655 		status =
   8656 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8657 		if ((status & WTX_ST_DD) == 0) {
   8658 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8659 			    BUS_DMASYNC_PREREAD);
   8660 			break;
   8661 		}
   8662 
   8663 		count++;
   8664 		DPRINTF(WM_DEBUG_TX,
   8665 		    ("%s: TX: job %d done: descs %d..%d\n",
   8666 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8667 		    txs->txs_lastdesc));
   8668 
   8669 		/*
   8670 		 * XXX We should probably be using the statistics
   8671 		 * XXX registers, but I don't know if they exist
   8672 		 * XXX on chips before the i82544.
   8673 		 */
   8674 
   8675 #ifdef WM_EVENT_COUNTERS
   8676 		if (status & WTX_ST_TU)
   8677 			WM_Q_EVCNT_INCR(txq, underrun);
   8678 #endif /* WM_EVENT_COUNTERS */
   8679 
   8680 		/*
   8681 		 * 82574 and newer's document says the status field has neither
   8682 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8683 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8684 		 * Developer's Manual", 82574 datasheet and newer.
   8685 		 *
   8686 		 * XXX I saw the LC bit was set on I218 even though the media
   8687 		 * was full duplex, so the bit might be used for other
   8688 		 * meaning ...(I have no document).
   8689 		 */
   8690 
   8691 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8692 		    && ((sc->sc_type < WM_T_82574)
   8693 			|| (sc->sc_type == WM_T_80003))) {
   8694 			if_statinc(ifp, if_oerrors);
   8695 			if (status & WTX_ST_LC)
   8696 				log(LOG_WARNING, "%s: late collision\n",
   8697 				    device_xname(sc->sc_dev));
   8698 			else if (status & WTX_ST_EC) {
   8699 				if_statadd(ifp, if_collisions,
   8700 				    TX_COLLISION_THRESHOLD + 1);
   8701 				log(LOG_WARNING, "%s: excessive collisions\n",
   8702 				    device_xname(sc->sc_dev));
   8703 			}
   8704 		} else
   8705 			if_statinc(ifp, if_opackets);
   8706 
   8707 		txq->txq_packets++;
   8708 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8709 
   8710 		txq->txq_free += txs->txs_ndesc;
   8711 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8712 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8713 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8714 		m_freem(txs->txs_mbuf);
   8715 		txs->txs_mbuf = NULL;
   8716 	}
   8717 
   8718 	/* Update the dirty transmit buffer pointer. */
   8719 	txq->txq_sdirty = i;
   8720 	DPRINTF(WM_DEBUG_TX,
   8721 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8722 
   8723 	/*
   8724 	 * If there are no more pending transmissions, cancel the watchdog
   8725 	 * timer.
   8726 	 */
   8727 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8728 		txq->txq_sending = false;
   8729 
   8730 	return more;
   8731 }
   8732 
   8733 static inline uint32_t
   8734 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8735 {
   8736 	struct wm_softc *sc = rxq->rxq_sc;
   8737 
   8738 	if (sc->sc_type == WM_T_82574)
   8739 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8740 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8741 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8742 	else
   8743 		return rxq->rxq_descs[idx].wrx_status;
   8744 }
   8745 
   8746 static inline uint32_t
   8747 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8748 {
   8749 	struct wm_softc *sc = rxq->rxq_sc;
   8750 
   8751 	if (sc->sc_type == WM_T_82574)
   8752 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8753 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8754 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8755 	else
   8756 		return rxq->rxq_descs[idx].wrx_errors;
   8757 }
   8758 
   8759 static inline uint16_t
   8760 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8761 {
   8762 	struct wm_softc *sc = rxq->rxq_sc;
   8763 
   8764 	if (sc->sc_type == WM_T_82574)
   8765 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8766 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8767 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8768 	else
   8769 		return rxq->rxq_descs[idx].wrx_special;
   8770 }
   8771 
   8772 static inline int
   8773 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8774 {
   8775 	struct wm_softc *sc = rxq->rxq_sc;
   8776 
   8777 	if (sc->sc_type == WM_T_82574)
   8778 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8779 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8780 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8781 	else
   8782 		return rxq->rxq_descs[idx].wrx_len;
   8783 }
   8784 
   8785 #ifdef WM_DEBUG
   8786 static inline uint32_t
   8787 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8788 {
   8789 	struct wm_softc *sc = rxq->rxq_sc;
   8790 
   8791 	if (sc->sc_type == WM_T_82574)
   8792 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8793 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8794 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8795 	else
   8796 		return 0;
   8797 }
   8798 
   8799 static inline uint8_t
   8800 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8801 {
   8802 	struct wm_softc *sc = rxq->rxq_sc;
   8803 
   8804 	if (sc->sc_type == WM_T_82574)
   8805 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8806 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8807 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8808 	else
   8809 		return 0;
   8810 }
   8811 #endif /* WM_DEBUG */
   8812 
   8813 static inline bool
   8814 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8815     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8816 {
   8817 
   8818 	if (sc->sc_type == WM_T_82574)
   8819 		return (status & ext_bit) != 0;
   8820 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8821 		return (status & nq_bit) != 0;
   8822 	else
   8823 		return (status & legacy_bit) != 0;
   8824 }
   8825 
   8826 static inline bool
   8827 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8828     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8829 {
   8830 
   8831 	if (sc->sc_type == WM_T_82574)
   8832 		return (error & ext_bit) != 0;
   8833 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8834 		return (error & nq_bit) != 0;
   8835 	else
   8836 		return (error & legacy_bit) != 0;
   8837 }
   8838 
   8839 static inline bool
   8840 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8841 {
   8842 
   8843 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8844 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8845 		return true;
   8846 	else
   8847 		return false;
   8848 }
   8849 
   8850 static inline bool
   8851 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8852 {
   8853 	struct wm_softc *sc = rxq->rxq_sc;
   8854 
   8855 	/* XXX missing error bit for newqueue? */
   8856 	if (wm_rxdesc_is_set_error(sc, errors,
   8857 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8858 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8859 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8860 		NQRXC_ERROR_RXE)) {
   8861 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8862 		    EXTRXC_ERROR_SE, 0))
   8863 			log(LOG_WARNING, "%s: symbol error\n",
   8864 			    device_xname(sc->sc_dev));
   8865 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8866 		    EXTRXC_ERROR_SEQ, 0))
   8867 			log(LOG_WARNING, "%s: receive sequence error\n",
   8868 			    device_xname(sc->sc_dev));
   8869 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8870 		    EXTRXC_ERROR_CE, 0))
   8871 			log(LOG_WARNING, "%s: CRC error\n",
   8872 			    device_xname(sc->sc_dev));
   8873 		return true;
   8874 	}
   8875 
   8876 	return false;
   8877 }
   8878 
   8879 static inline bool
   8880 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8881 {
   8882 	struct wm_softc *sc = rxq->rxq_sc;
   8883 
   8884 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8885 		NQRXC_STATUS_DD)) {
   8886 		/* We have processed all of the receive descriptors. */
   8887 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8888 		return false;
   8889 	}
   8890 
   8891 	return true;
   8892 }
   8893 
   8894 static inline bool
   8895 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8896     uint16_t vlantag, struct mbuf *m)
   8897 {
   8898 
   8899 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8900 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8901 		vlan_set_tag(m, le16toh(vlantag));
   8902 	}
   8903 
   8904 	return true;
   8905 }
   8906 
   8907 static inline void
   8908 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8909     uint32_t errors, struct mbuf *m)
   8910 {
   8911 	struct wm_softc *sc = rxq->rxq_sc;
   8912 
   8913 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8914 		if (wm_rxdesc_is_set_status(sc, status,
   8915 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8916 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8917 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8918 			if (wm_rxdesc_is_set_error(sc, errors,
   8919 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8920 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8921 		}
   8922 		if (wm_rxdesc_is_set_status(sc, status,
   8923 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8924 			/*
   8925 			 * Note: we don't know if this was TCP or UDP,
   8926 			 * so we just set both bits, and expect the
   8927 			 * upper layers to deal.
   8928 			 */
   8929 			WM_Q_EVCNT_INCR(rxq, tusum);
   8930 			m->m_pkthdr.csum_flags |=
   8931 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8932 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8933 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8934 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8935 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8936 		}
   8937 	}
   8938 }
   8939 
   8940 /*
   8941  * wm_rxeof:
   8942  *
   8943  *	Helper; handle receive interrupts.
   8944  */
   8945 static bool
   8946 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8947 {
   8948 	struct wm_softc *sc = rxq->rxq_sc;
   8949 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8950 	struct wm_rxsoft *rxs;
   8951 	struct mbuf *m;
   8952 	int i, len;
   8953 	int count = 0;
   8954 	uint32_t status, errors;
   8955 	uint16_t vlantag;
   8956 	bool more = false;
   8957 
   8958 	KASSERT(mutex_owned(rxq->rxq_lock));
   8959 
   8960 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8961 		if (limit-- == 0) {
   8962 			rxq->rxq_ptr = i;
   8963 			more = true;
   8964 			DPRINTF(WM_DEBUG_RX,
   8965 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8966 				device_xname(sc->sc_dev), i));
   8967 			break;
   8968 		}
   8969 
   8970 		rxs = &rxq->rxq_soft[i];
   8971 
   8972 		DPRINTF(WM_DEBUG_RX,
   8973 		    ("%s: RX: checking descriptor %d\n",
   8974 			device_xname(sc->sc_dev), i));
   8975 		wm_cdrxsync(rxq, i,
   8976 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8977 
   8978 		status = wm_rxdesc_get_status(rxq, i);
   8979 		errors = wm_rxdesc_get_errors(rxq, i);
   8980 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8981 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8982 #ifdef WM_DEBUG
   8983 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8984 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8985 #endif
   8986 
   8987 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8988 			/*
   8989 			 * Update the receive pointer holding rxq_lock
   8990 			 * consistent with increment counter.
   8991 			 */
   8992 			rxq->rxq_ptr = i;
   8993 			break;
   8994 		}
   8995 
   8996 		count++;
   8997 		if (__predict_false(rxq->rxq_discard)) {
   8998 			DPRINTF(WM_DEBUG_RX,
   8999 			    ("%s: RX: discarding contents of descriptor %d\n",
   9000 				device_xname(sc->sc_dev), i));
   9001 			wm_init_rxdesc(rxq, i);
   9002 			if (wm_rxdesc_is_eop(rxq, status)) {
   9003 				/* Reset our state. */
   9004 				DPRINTF(WM_DEBUG_RX,
   9005 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9006 					device_xname(sc->sc_dev)));
   9007 				rxq->rxq_discard = 0;
   9008 			}
   9009 			continue;
   9010 		}
   9011 
   9012 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9013 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9014 
   9015 		m = rxs->rxs_mbuf;
   9016 
   9017 		/*
   9018 		 * Add a new receive buffer to the ring, unless of
   9019 		 * course the length is zero. Treat the latter as a
   9020 		 * failed mapping.
   9021 		 */
   9022 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9023 			/*
   9024 			 * Failed, throw away what we've done so
   9025 			 * far, and discard the rest of the packet.
   9026 			 */
   9027 			if_statinc(ifp, if_ierrors);
   9028 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9029 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9030 			wm_init_rxdesc(rxq, i);
   9031 			if (!wm_rxdesc_is_eop(rxq, status))
   9032 				rxq->rxq_discard = 1;
   9033 			if (rxq->rxq_head != NULL)
   9034 				m_freem(rxq->rxq_head);
   9035 			WM_RXCHAIN_RESET(rxq);
   9036 			DPRINTF(WM_DEBUG_RX,
   9037 			    ("%s: RX: Rx buffer allocation failed, "
   9038 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9039 				rxq->rxq_discard ? " (discard)" : ""));
   9040 			continue;
   9041 		}
   9042 
   9043 		m->m_len = len;
   9044 		rxq->rxq_len += len;
   9045 		DPRINTF(WM_DEBUG_RX,
   9046 		    ("%s: RX: buffer at %p len %d\n",
   9047 			device_xname(sc->sc_dev), m->m_data, len));
   9048 
   9049 		/* If this is not the end of the packet, keep looking. */
   9050 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9051 			WM_RXCHAIN_LINK(rxq, m);
   9052 			DPRINTF(WM_DEBUG_RX,
   9053 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9054 				device_xname(sc->sc_dev), rxq->rxq_len));
   9055 			continue;
   9056 		}
   9057 
   9058 		/*
   9059 		 * Okay, we have the entire packet now. The chip is
   9060 		 * configured to include the FCS except I350 and I21[01]
   9061 		 * (not all chips can be configured to strip it),
   9062 		 * so we need to trim it.
   9063 		 * May need to adjust length of previous mbuf in the
   9064 		 * chain if the current mbuf is too short.
   9065 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   9066 		 * is always set in I350, so we don't trim it.
   9067 		 */
   9068 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   9069 		    && (sc->sc_type != WM_T_I210)
   9070 		    && (sc->sc_type != WM_T_I211)) {
   9071 			if (m->m_len < ETHER_CRC_LEN) {
   9072 				rxq->rxq_tail->m_len
   9073 				    -= (ETHER_CRC_LEN - m->m_len);
   9074 				m->m_len = 0;
   9075 			} else
   9076 				m->m_len -= ETHER_CRC_LEN;
   9077 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9078 		} else
   9079 			len = rxq->rxq_len;
   9080 
   9081 		WM_RXCHAIN_LINK(rxq, m);
   9082 
   9083 		*rxq->rxq_tailp = NULL;
   9084 		m = rxq->rxq_head;
   9085 
   9086 		WM_RXCHAIN_RESET(rxq);
   9087 
   9088 		DPRINTF(WM_DEBUG_RX,
   9089 		    ("%s: RX: have entire packet, len -> %d\n",
   9090 			device_xname(sc->sc_dev), len));
   9091 
   9092 		/* If an error occurred, update stats and drop the packet. */
   9093 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9094 			m_freem(m);
   9095 			continue;
   9096 		}
   9097 
   9098 		/* No errors.  Receive the packet. */
   9099 		m_set_rcvif(m, ifp);
   9100 		m->m_pkthdr.len = len;
   9101 		/*
   9102 		 * TODO
   9103 		 * should be save rsshash and rsstype to this mbuf.
   9104 		 */
   9105 		DPRINTF(WM_DEBUG_RX,
   9106 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9107 			device_xname(sc->sc_dev), rsstype, rsshash));
   9108 
   9109 		/*
   9110 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9111 		 * for us.  Associate the tag with the packet.
   9112 		 */
   9113 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9114 			continue;
   9115 
   9116 		/* Set up checksum info for this packet. */
   9117 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9118 		/*
   9119 		 * Update the receive pointer holding rxq_lock consistent with
   9120 		 * increment counter.
   9121 		 */
   9122 		rxq->rxq_ptr = i;
   9123 		rxq->rxq_packets++;
   9124 		rxq->rxq_bytes += len;
   9125 		mutex_exit(rxq->rxq_lock);
   9126 
   9127 		/* Pass it on. */
   9128 		if_percpuq_enqueue(sc->sc_ipq, m);
   9129 
   9130 		mutex_enter(rxq->rxq_lock);
   9131 
   9132 		if (rxq->rxq_stopping)
   9133 			break;
   9134 	}
   9135 
   9136 	DPRINTF(WM_DEBUG_RX,
   9137 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9138 
   9139 	return more;
   9140 }
   9141 
   9142 /*
   9143  * wm_linkintr_gmii:
   9144  *
   9145  *	Helper; handle link interrupts for GMII.
   9146  */
   9147 static void
   9148 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9149 {
   9150 	device_t dev = sc->sc_dev;
   9151 	uint32_t status, reg;
   9152 	bool link;
   9153 	int rv;
   9154 
   9155 	KASSERT(WM_CORE_LOCKED(sc));
   9156 
   9157 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9158 		__func__));
   9159 
   9160 	if ((icr & ICR_LSC) == 0) {
   9161 		if (icr & ICR_RXSEQ)
   9162 			DPRINTF(WM_DEBUG_LINK,
   9163 			    ("%s: LINK Receive sequence error\n",
   9164 				device_xname(dev)));
   9165 		return;
   9166 	}
   9167 
   9168 	/* Link status changed */
   9169 	status = CSR_READ(sc, WMREG_STATUS);
   9170 	link = status & STATUS_LU;
   9171 	if (link) {
   9172 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9173 			device_xname(dev),
   9174 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9175 	} else {
   9176 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9177 			device_xname(dev)));
   9178 	}
   9179 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9180 		wm_gig_downshift_workaround_ich8lan(sc);
   9181 
   9182 	if ((sc->sc_type == WM_T_ICH8)
   9183 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9184 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9185 	}
   9186 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9187 		device_xname(dev)));
   9188 	mii_pollstat(&sc->sc_mii);
   9189 	if (sc->sc_type == WM_T_82543) {
   9190 		int miistatus, active;
   9191 
   9192 		/*
   9193 		 * With 82543, we need to force speed and
   9194 		 * duplex on the MAC equal to what the PHY
   9195 		 * speed and duplex configuration is.
   9196 		 */
   9197 		miistatus = sc->sc_mii.mii_media_status;
   9198 
   9199 		if (miistatus & IFM_ACTIVE) {
   9200 			active = sc->sc_mii.mii_media_active;
   9201 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9202 			switch (IFM_SUBTYPE(active)) {
   9203 			case IFM_10_T:
   9204 				sc->sc_ctrl |= CTRL_SPEED_10;
   9205 				break;
   9206 			case IFM_100_TX:
   9207 				sc->sc_ctrl |= CTRL_SPEED_100;
   9208 				break;
   9209 			case IFM_1000_T:
   9210 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9211 				break;
   9212 			default:
   9213 				/*
   9214 				 * Fiber?
   9215 				 * Shoud not enter here.
   9216 				 */
   9217 				device_printf(dev, "unknown media (%x)\n",
   9218 				    active);
   9219 				break;
   9220 			}
   9221 			if (active & IFM_FDX)
   9222 				sc->sc_ctrl |= CTRL_FD;
   9223 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9224 		}
   9225 	} else if (sc->sc_type == WM_T_PCH) {
   9226 		wm_k1_gig_workaround_hv(sc,
   9227 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9228 	}
   9229 
   9230 	/*
   9231 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9232 	 * aggressive resulting in many collisions. To avoid this, increase
   9233 	 * the IPG and reduce Rx latency in the PHY.
   9234 	 */
   9235 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9236 	    && link) {
   9237 		uint32_t tipg_reg;
   9238 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9239 		bool fdx;
   9240 		uint16_t emi_addr, emi_val;
   9241 
   9242 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9243 		tipg_reg &= ~TIPG_IPGT_MASK;
   9244 		fdx = status & STATUS_FD;
   9245 
   9246 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9247 			tipg_reg |= 0xff;
   9248 			/* Reduce Rx latency in analog PHY */
   9249 			emi_val = 0;
   9250 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9251 		    fdx && speed != STATUS_SPEED_1000) {
   9252 			tipg_reg |= 0xc;
   9253 			emi_val = 1;
   9254 		} else {
   9255 			/* Roll back the default values */
   9256 			tipg_reg |= 0x08;
   9257 			emi_val = 1;
   9258 		}
   9259 
   9260 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9261 
   9262 		rv = sc->phy.acquire(sc);
   9263 		if (rv)
   9264 			return;
   9265 
   9266 		if (sc->sc_type == WM_T_PCH2)
   9267 			emi_addr = I82579_RX_CONFIG;
   9268 		else
   9269 			emi_addr = I217_RX_CONFIG;
   9270 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9271 
   9272 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9273 			uint16_t phy_reg;
   9274 
   9275 			sc->phy.readreg_locked(dev, 2,
   9276 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9277 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9278 			if (speed == STATUS_SPEED_100
   9279 			    || speed == STATUS_SPEED_10)
   9280 				phy_reg |= 0x3e8;
   9281 			else
   9282 				phy_reg |= 0xfa;
   9283 			sc->phy.writereg_locked(dev, 2,
   9284 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9285 
   9286 			if (speed == STATUS_SPEED_1000) {
   9287 				sc->phy.readreg_locked(dev, 2,
   9288 				    HV_PM_CTRL, &phy_reg);
   9289 
   9290 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9291 
   9292 				sc->phy.writereg_locked(dev, 2,
   9293 				    HV_PM_CTRL, phy_reg);
   9294 			}
   9295 		}
   9296 		sc->phy.release(sc);
   9297 
   9298 		if (rv)
   9299 			return;
   9300 
   9301 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9302 			uint16_t data, ptr_gap;
   9303 
   9304 			if (speed == STATUS_SPEED_1000) {
   9305 				rv = sc->phy.acquire(sc);
   9306 				if (rv)
   9307 					return;
   9308 
   9309 				rv = sc->phy.readreg_locked(dev, 2,
   9310 				    I219_UNKNOWN1, &data);
   9311 				if (rv) {
   9312 					sc->phy.release(sc);
   9313 					return;
   9314 				}
   9315 
   9316 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9317 				if (ptr_gap < 0x18) {
   9318 					data &= ~(0x3ff << 2);
   9319 					data |= (0x18 << 2);
   9320 					rv = sc->phy.writereg_locked(dev,
   9321 					    2, I219_UNKNOWN1, data);
   9322 				}
   9323 				sc->phy.release(sc);
   9324 				if (rv)
   9325 					return;
   9326 			} else {
   9327 				rv = sc->phy.acquire(sc);
   9328 				if (rv)
   9329 					return;
   9330 
   9331 				rv = sc->phy.writereg_locked(dev, 2,
   9332 				    I219_UNKNOWN1, 0xc023);
   9333 				sc->phy.release(sc);
   9334 				if (rv)
   9335 					return;
   9336 
   9337 			}
   9338 		}
   9339 	}
   9340 
   9341 	/*
   9342 	 * I217 Packet Loss issue:
   9343 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9344 	 * on power up.
   9345 	 * Set the Beacon Duration for I217 to 8 usec
   9346 	 */
   9347 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9348 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9349 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9350 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9351 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9352 	}
   9353 
   9354 	/* Work-around I218 hang issue */
   9355 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9356 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9357 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9358 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9359 		wm_k1_workaround_lpt_lp(sc, link);
   9360 
   9361 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9362 		/*
   9363 		 * Set platform power management values for Latency
   9364 		 * Tolerance Reporting (LTR)
   9365 		 */
   9366 		wm_platform_pm_pch_lpt(sc,
   9367 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9368 	}
   9369 
   9370 	/* Clear link partner's EEE ability */
   9371 	sc->eee_lp_ability = 0;
   9372 
   9373 	/* FEXTNVM6 K1-off workaround */
   9374 	if (sc->sc_type == WM_T_PCH_SPT) {
   9375 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9376 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9377 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9378 		else
   9379 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9380 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9381 	}
   9382 
   9383 	if (!link)
   9384 		return;
   9385 
   9386 	switch (sc->sc_type) {
   9387 	case WM_T_PCH2:
   9388 		wm_k1_workaround_lv(sc);
   9389 		/* FALLTHROUGH */
   9390 	case WM_T_PCH:
   9391 		if (sc->sc_phytype == WMPHY_82578)
   9392 			wm_link_stall_workaround_hv(sc);
   9393 		break;
   9394 	default:
   9395 		break;
   9396 	}
   9397 
   9398 	/* Enable/Disable EEE after link up */
   9399 	if (sc->sc_phytype > WMPHY_82579)
   9400 		wm_set_eee_pchlan(sc);
   9401 }
   9402 
   9403 /*
   9404  * wm_linkintr_tbi:
   9405  *
   9406  *	Helper; handle link interrupts for TBI mode.
   9407  */
   9408 static void
   9409 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9410 {
   9411 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9412 	uint32_t status;
   9413 
   9414 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9415 		__func__));
   9416 
   9417 	status = CSR_READ(sc, WMREG_STATUS);
   9418 	if (icr & ICR_LSC) {
   9419 		wm_check_for_link(sc);
   9420 		if (status & STATUS_LU) {
   9421 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9422 				device_xname(sc->sc_dev),
   9423 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9424 			/*
   9425 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9426 			 * so we should update sc->sc_ctrl
   9427 			 */
   9428 
   9429 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9430 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9431 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9432 			if (status & STATUS_FD)
   9433 				sc->sc_tctl |=
   9434 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9435 			else
   9436 				sc->sc_tctl |=
   9437 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9438 			if (sc->sc_ctrl & CTRL_TFCE)
   9439 				sc->sc_fcrtl |= FCRTL_XONE;
   9440 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9441 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9442 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9443 			sc->sc_tbi_linkup = 1;
   9444 			if_link_state_change(ifp, LINK_STATE_UP);
   9445 		} else {
   9446 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9447 				device_xname(sc->sc_dev)));
   9448 			sc->sc_tbi_linkup = 0;
   9449 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9450 		}
   9451 		/* Update LED */
   9452 		wm_tbi_serdes_set_linkled(sc);
   9453 	} else if (icr & ICR_RXSEQ)
   9454 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9455 			device_xname(sc->sc_dev)));
   9456 }
   9457 
   9458 /*
   9459  * wm_linkintr_serdes:
   9460  *
   9461  *	Helper; handle link interrupts for TBI mode.
   9462  */
   9463 static void
   9464 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9465 {
   9466 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9467 	struct mii_data *mii = &sc->sc_mii;
   9468 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9469 	uint32_t pcs_adv, pcs_lpab, reg;
   9470 
   9471 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9472 		__func__));
   9473 
   9474 	if (icr & ICR_LSC) {
   9475 		/* Check PCS */
   9476 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9477 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9478 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9479 				device_xname(sc->sc_dev)));
   9480 			mii->mii_media_status |= IFM_ACTIVE;
   9481 			sc->sc_tbi_linkup = 1;
   9482 			if_link_state_change(ifp, LINK_STATE_UP);
   9483 		} else {
   9484 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9485 				device_xname(sc->sc_dev)));
   9486 			mii->mii_media_status |= IFM_NONE;
   9487 			sc->sc_tbi_linkup = 0;
   9488 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9489 			wm_tbi_serdes_set_linkled(sc);
   9490 			return;
   9491 		}
   9492 		mii->mii_media_active |= IFM_1000_SX;
   9493 		if ((reg & PCS_LSTS_FDX) != 0)
   9494 			mii->mii_media_active |= IFM_FDX;
   9495 		else
   9496 			mii->mii_media_active |= IFM_HDX;
   9497 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9498 			/* Check flow */
   9499 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9500 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9501 				DPRINTF(WM_DEBUG_LINK,
   9502 				    ("XXX LINKOK but not ACOMP\n"));
   9503 				return;
   9504 			}
   9505 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9506 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9507 			DPRINTF(WM_DEBUG_LINK,
   9508 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9509 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9510 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9511 				mii->mii_media_active |= IFM_FLOW
   9512 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9513 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9514 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9515 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9516 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9517 				mii->mii_media_active |= IFM_FLOW
   9518 				    | IFM_ETH_TXPAUSE;
   9519 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9520 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9521 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9522 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9523 				mii->mii_media_active |= IFM_FLOW
   9524 				    | IFM_ETH_RXPAUSE;
   9525 		}
   9526 		/* Update LED */
   9527 		wm_tbi_serdes_set_linkled(sc);
   9528 	} else
   9529 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9530 		    device_xname(sc->sc_dev)));
   9531 }
   9532 
   9533 /*
   9534  * wm_linkintr:
   9535  *
   9536  *	Helper; handle link interrupts.
   9537  */
   9538 static void
   9539 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9540 {
   9541 
   9542 	KASSERT(WM_CORE_LOCKED(sc));
   9543 
   9544 	if (sc->sc_flags & WM_F_HAS_MII)
   9545 		wm_linkintr_gmii(sc, icr);
   9546 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9547 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9548 		wm_linkintr_serdes(sc, icr);
   9549 	else
   9550 		wm_linkintr_tbi(sc, icr);
   9551 }
   9552 
   9553 
   9554 static inline void
   9555 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9556 {
   9557 
   9558 	if (wmq->wmq_txrx_use_workqueue)
   9559 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9560 	else
   9561 		softint_schedule(wmq->wmq_si);
   9562 }
   9563 
   9564 /*
   9565  * wm_intr_legacy:
   9566  *
   9567  *	Interrupt service routine for INTx and MSI.
   9568  */
   9569 static int
   9570 wm_intr_legacy(void *arg)
   9571 {
   9572 	struct wm_softc *sc = arg;
   9573 	struct wm_queue *wmq = &sc->sc_queue[0];
   9574 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9575 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9576 	uint32_t icr, rndval = 0;
   9577 	int handled = 0;
   9578 
   9579 	while (1 /* CONSTCOND */) {
   9580 		icr = CSR_READ(sc, WMREG_ICR);
   9581 		if ((icr & sc->sc_icr) == 0)
   9582 			break;
   9583 		if (handled == 0)
   9584 			DPRINTF(WM_DEBUG_TX,
   9585 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9586 		if (rndval == 0)
   9587 			rndval = icr;
   9588 
   9589 		mutex_enter(rxq->rxq_lock);
   9590 
   9591 		if (rxq->rxq_stopping) {
   9592 			mutex_exit(rxq->rxq_lock);
   9593 			break;
   9594 		}
   9595 
   9596 		handled = 1;
   9597 
   9598 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9599 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9600 			DPRINTF(WM_DEBUG_RX,
   9601 			    ("%s: RX: got Rx intr 0x%08x\n",
   9602 				device_xname(sc->sc_dev),
   9603 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9604 			WM_Q_EVCNT_INCR(rxq, intr);
   9605 		}
   9606 #endif
   9607 		/*
   9608 		 * wm_rxeof() does *not* call upper layer functions directly,
   9609 		 * as if_percpuq_enqueue() just call softint_schedule().
   9610 		 * So, we can call wm_rxeof() in interrupt context.
   9611 		 */
   9612 		wm_rxeof(rxq, UINT_MAX);
   9613 		/* Fill lower bits with RX index. See below for the upper. */
   9614 		rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9615 
   9616 		mutex_exit(rxq->rxq_lock);
   9617 		mutex_enter(txq->txq_lock);
   9618 
   9619 		if (txq->txq_stopping) {
   9620 			mutex_exit(txq->txq_lock);
   9621 			break;
   9622 		}
   9623 
   9624 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9625 		if (icr & ICR_TXDW) {
   9626 			DPRINTF(WM_DEBUG_TX,
   9627 			    ("%s: TX: got TXDW interrupt\n",
   9628 				device_xname(sc->sc_dev)));
   9629 			WM_Q_EVCNT_INCR(txq, txdw);
   9630 		}
   9631 #endif
   9632 		wm_txeof(txq, UINT_MAX);
   9633 		/* Fill upper bits with TX index. See above for the lower. */
   9634 		rndval = txq->txq_next * WM_NRXDESC;
   9635 
   9636 		mutex_exit(txq->txq_lock);
   9637 		WM_CORE_LOCK(sc);
   9638 
   9639 		if (sc->sc_core_stopping) {
   9640 			WM_CORE_UNLOCK(sc);
   9641 			break;
   9642 		}
   9643 
   9644 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9645 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9646 			wm_linkintr(sc, icr);
   9647 		}
   9648 		if ((icr & ICR_GPI(0)) != 0)
   9649 			device_printf(sc->sc_dev, "got module interrupt\n");
   9650 
   9651 		WM_CORE_UNLOCK(sc);
   9652 
   9653 		if (icr & ICR_RXO) {
   9654 #if defined(WM_DEBUG)
   9655 			log(LOG_WARNING, "%s: Receive overrun\n",
   9656 			    device_xname(sc->sc_dev));
   9657 #endif /* defined(WM_DEBUG) */
   9658 		}
   9659 	}
   9660 
   9661 	rnd_add_uint32(&sc->sc_queue[0].rnd_source, rndval);
   9662 
   9663 	if (handled) {
   9664 		/* Try to get more packets going. */
   9665 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9666 		wm_sched_handle_queue(sc, wmq);
   9667 	}
   9668 
   9669 	return handled;
   9670 }
   9671 
   9672 static inline void
   9673 wm_txrxintr_disable(struct wm_queue *wmq)
   9674 {
   9675 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9676 
   9677 	if (sc->sc_type == WM_T_82574)
   9678 		CSR_WRITE(sc, WMREG_IMC,
   9679 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9680 	else if (sc->sc_type == WM_T_82575)
   9681 		CSR_WRITE(sc, WMREG_EIMC,
   9682 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9683 	else
   9684 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9685 }
   9686 
   9687 static inline void
   9688 wm_txrxintr_enable(struct wm_queue *wmq)
   9689 {
   9690 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9691 
   9692 	wm_itrs_calculate(sc, wmq);
   9693 
   9694 	/*
   9695 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9696 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9697 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9698 	 * while each wm_handle_queue(wmq) is runnig.
   9699 	 */
   9700 	if (sc->sc_type == WM_T_82574)
   9701 		CSR_WRITE(sc, WMREG_IMS,
   9702 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9703 	else if (sc->sc_type == WM_T_82575)
   9704 		CSR_WRITE(sc, WMREG_EIMS,
   9705 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9706 	else
   9707 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9708 }
   9709 
   9710 static int
   9711 wm_txrxintr_msix(void *arg)
   9712 {
   9713 	struct wm_queue *wmq = arg;
   9714 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9715 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9716 	struct wm_softc *sc = txq->txq_sc;
   9717 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9718 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9719 	uint32_t rndval = 0;
   9720 	bool txmore;
   9721 	bool rxmore;
   9722 
   9723 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9724 
   9725 	DPRINTF(WM_DEBUG_TX,
   9726 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9727 
   9728 	wm_txrxintr_disable(wmq);
   9729 
   9730 	mutex_enter(txq->txq_lock);
   9731 
   9732 	if (txq->txq_stopping) {
   9733 		mutex_exit(txq->txq_lock);
   9734 		return 0;
   9735 	}
   9736 
   9737 	WM_Q_EVCNT_INCR(txq, txdw);
   9738 	txmore = wm_txeof(txq, txlimit);
   9739 	/* Fill upper bits with TX index. See below for the lower. */
   9740 	rndval = txq->txq_next * WM_NRXDESC;
   9741 	/* wm_deferred start() is done in wm_handle_queue(). */
   9742 	mutex_exit(txq->txq_lock);
   9743 
   9744 	DPRINTF(WM_DEBUG_RX,
   9745 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9746 	mutex_enter(rxq->rxq_lock);
   9747 
   9748 	if (rxq->rxq_stopping) {
   9749 		mutex_exit(rxq->rxq_lock);
   9750 		return 0;
   9751 	}
   9752 
   9753 	WM_Q_EVCNT_INCR(rxq, intr);
   9754 	rxmore = wm_rxeof(rxq, rxlimit);
   9755 
   9756 	/* Fill lower bits with RX index. See above for the upper. */
   9757 	rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9758 	mutex_exit(rxq->rxq_lock);
   9759 
   9760 	wm_itrs_writereg(sc, wmq);
   9761 
   9762 	/*
   9763 	 * This function is called in the hardware interrupt context and
   9764 	 * per-CPU, so it's not required to take a lock.
   9765 	 */
   9766 	if (rndval != 0)
   9767 		rnd_add_uint32(&sc->sc_queue[wmq->wmq_id].rnd_source, rndval);
   9768 
   9769 	if (txmore || rxmore) {
   9770 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9771 		wm_sched_handle_queue(sc, wmq);
   9772 	} else
   9773 		wm_txrxintr_enable(wmq);
   9774 
   9775 	return 1;
   9776 }
   9777 
   9778 static void
   9779 wm_handle_queue(void *arg)
   9780 {
   9781 	struct wm_queue *wmq = arg;
   9782 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9783 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9784 	struct wm_softc *sc = txq->txq_sc;
   9785 	u_int txlimit = sc->sc_tx_process_limit;
   9786 	u_int rxlimit = sc->sc_rx_process_limit;
   9787 	bool txmore;
   9788 	bool rxmore;
   9789 
   9790 	mutex_enter(txq->txq_lock);
   9791 	if (txq->txq_stopping) {
   9792 		mutex_exit(txq->txq_lock);
   9793 		return;
   9794 	}
   9795 	txmore = wm_txeof(txq, txlimit);
   9796 	wm_deferred_start_locked(txq);
   9797 	mutex_exit(txq->txq_lock);
   9798 
   9799 	mutex_enter(rxq->rxq_lock);
   9800 	if (rxq->rxq_stopping) {
   9801 		mutex_exit(rxq->rxq_lock);
   9802 		return;
   9803 	}
   9804 	WM_Q_EVCNT_INCR(rxq, defer);
   9805 	rxmore = wm_rxeof(rxq, rxlimit);
   9806 	mutex_exit(rxq->rxq_lock);
   9807 
   9808 	if (txmore || rxmore) {
   9809 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9810 		wm_sched_handle_queue(sc, wmq);
   9811 	} else
   9812 		wm_txrxintr_enable(wmq);
   9813 }
   9814 
   9815 static void
   9816 wm_handle_queue_work(struct work *wk, void *context)
   9817 {
   9818 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   9819 
   9820 	/*
   9821 	 * "enqueued flag" is not required here.
   9822 	 */
   9823 	wm_handle_queue(wmq);
   9824 }
   9825 
   9826 /*
   9827  * wm_linkintr_msix:
   9828  *
   9829  *	Interrupt service routine for link status change for MSI-X.
   9830  */
   9831 static int
   9832 wm_linkintr_msix(void *arg)
   9833 {
   9834 	struct wm_softc *sc = arg;
   9835 	uint32_t reg;
   9836 	bool has_rxo;
   9837 
   9838 	reg = CSR_READ(sc, WMREG_ICR);
   9839 	WM_CORE_LOCK(sc);
   9840 	DPRINTF(WM_DEBUG_LINK,
   9841 	    ("%s: LINK: got link intr. ICR = %08x\n",
   9842 		device_xname(sc->sc_dev), reg));
   9843 
   9844 	if (sc->sc_core_stopping)
   9845 		goto out;
   9846 
   9847 	if ((reg & ICR_LSC) != 0) {
   9848 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9849 		wm_linkintr(sc, ICR_LSC);
   9850 	}
   9851 	if ((reg & ICR_GPI(0)) != 0)
   9852 		device_printf(sc->sc_dev, "got module interrupt\n");
   9853 
   9854 	/*
   9855 	 * XXX 82574 MSI-X mode workaround
   9856 	 *
   9857 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9858 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9859 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9860 	 * interrupts by writing WMREG_ICS to process receive packets.
   9861 	 */
   9862 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9863 #if defined(WM_DEBUG)
   9864 		log(LOG_WARNING, "%s: Receive overrun\n",
   9865 		    device_xname(sc->sc_dev));
   9866 #endif /* defined(WM_DEBUG) */
   9867 
   9868 		has_rxo = true;
   9869 		/*
   9870 		 * The RXO interrupt is very high rate when receive traffic is
   9871 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9872 		 * interrupts. ICR_OTHER will be enabled at the end of
   9873 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9874 		 * ICR_RXQ(1) interrupts.
   9875 		 */
   9876 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9877 
   9878 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9879 	}
   9880 
   9881 
   9882 
   9883 out:
   9884 	WM_CORE_UNLOCK(sc);
   9885 
   9886 	if (sc->sc_type == WM_T_82574) {
   9887 		if (!has_rxo)
   9888 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9889 		else
   9890 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9891 	} else if (sc->sc_type == WM_T_82575)
   9892 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9893 	else
   9894 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9895 
   9896 	return 1;
   9897 }
   9898 
   9899 /*
   9900  * Media related.
   9901  * GMII, SGMII, TBI (and SERDES)
   9902  */
   9903 
   9904 /* Common */
   9905 
   9906 /*
   9907  * wm_tbi_serdes_set_linkled:
   9908  *
   9909  *	Update the link LED on TBI and SERDES devices.
   9910  */
   9911 static void
   9912 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9913 {
   9914 
   9915 	if (sc->sc_tbi_linkup)
   9916 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9917 	else
   9918 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9919 
   9920 	/* 82540 or newer devices are active low */
   9921 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9922 
   9923 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9924 }
   9925 
   9926 /* GMII related */
   9927 
   9928 /*
   9929  * wm_gmii_reset:
   9930  *
   9931  *	Reset the PHY.
   9932  */
   9933 static void
   9934 wm_gmii_reset(struct wm_softc *sc)
   9935 {
   9936 	uint32_t reg;
   9937 	int rv;
   9938 
   9939 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9940 		device_xname(sc->sc_dev), __func__));
   9941 
   9942 	rv = sc->phy.acquire(sc);
   9943 	if (rv != 0) {
   9944 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9945 		    __func__);
   9946 		return;
   9947 	}
   9948 
   9949 	switch (sc->sc_type) {
   9950 	case WM_T_82542_2_0:
   9951 	case WM_T_82542_2_1:
   9952 		/* null */
   9953 		break;
   9954 	case WM_T_82543:
   9955 		/*
   9956 		 * With 82543, we need to force speed and duplex on the MAC
   9957 		 * equal to what the PHY speed and duplex configuration is.
   9958 		 * In addition, we need to perform a hardware reset on the PHY
   9959 		 * to take it out of reset.
   9960 		 */
   9961 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9962 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9963 
   9964 		/* The PHY reset pin is active-low. */
   9965 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9966 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9967 		    CTRL_EXT_SWDPIN(4));
   9968 		reg |= CTRL_EXT_SWDPIO(4);
   9969 
   9970 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9971 		CSR_WRITE_FLUSH(sc);
   9972 		delay(10*1000);
   9973 
   9974 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9975 		CSR_WRITE_FLUSH(sc);
   9976 		delay(150);
   9977 #if 0
   9978 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9979 #endif
   9980 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9981 		break;
   9982 	case WM_T_82544:	/* Reset 10000us */
   9983 	case WM_T_82540:
   9984 	case WM_T_82545:
   9985 	case WM_T_82545_3:
   9986 	case WM_T_82546:
   9987 	case WM_T_82546_3:
   9988 	case WM_T_82541:
   9989 	case WM_T_82541_2:
   9990 	case WM_T_82547:
   9991 	case WM_T_82547_2:
   9992 	case WM_T_82571:	/* Reset 100us */
   9993 	case WM_T_82572:
   9994 	case WM_T_82573:
   9995 	case WM_T_82574:
   9996 	case WM_T_82575:
   9997 	case WM_T_82576:
   9998 	case WM_T_82580:
   9999 	case WM_T_I350:
   10000 	case WM_T_I354:
   10001 	case WM_T_I210:
   10002 	case WM_T_I211:
   10003 	case WM_T_82583:
   10004 	case WM_T_80003:
   10005 		/* Generic reset */
   10006 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10007 		CSR_WRITE_FLUSH(sc);
   10008 		delay(20000);
   10009 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10010 		CSR_WRITE_FLUSH(sc);
   10011 		delay(20000);
   10012 
   10013 		if ((sc->sc_type == WM_T_82541)
   10014 		    || (sc->sc_type == WM_T_82541_2)
   10015 		    || (sc->sc_type == WM_T_82547)
   10016 		    || (sc->sc_type == WM_T_82547_2)) {
   10017 			/* Workaround for igp are done in igp_reset() */
   10018 			/* XXX add code to set LED after phy reset */
   10019 		}
   10020 		break;
   10021 	case WM_T_ICH8:
   10022 	case WM_T_ICH9:
   10023 	case WM_T_ICH10:
   10024 	case WM_T_PCH:
   10025 	case WM_T_PCH2:
   10026 	case WM_T_PCH_LPT:
   10027 	case WM_T_PCH_SPT:
   10028 	case WM_T_PCH_CNP:
   10029 		/* Generic reset */
   10030 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10031 		CSR_WRITE_FLUSH(sc);
   10032 		delay(100);
   10033 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10034 		CSR_WRITE_FLUSH(sc);
   10035 		delay(150);
   10036 		break;
   10037 	default:
   10038 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10039 		    __func__);
   10040 		break;
   10041 	}
   10042 
   10043 	sc->phy.release(sc);
   10044 
   10045 	/* get_cfg_done */
   10046 	wm_get_cfg_done(sc);
   10047 
   10048 	/* Extra setup */
   10049 	switch (sc->sc_type) {
   10050 	case WM_T_82542_2_0:
   10051 	case WM_T_82542_2_1:
   10052 	case WM_T_82543:
   10053 	case WM_T_82544:
   10054 	case WM_T_82540:
   10055 	case WM_T_82545:
   10056 	case WM_T_82545_3:
   10057 	case WM_T_82546:
   10058 	case WM_T_82546_3:
   10059 	case WM_T_82541_2:
   10060 	case WM_T_82547_2:
   10061 	case WM_T_82571:
   10062 	case WM_T_82572:
   10063 	case WM_T_82573:
   10064 	case WM_T_82574:
   10065 	case WM_T_82583:
   10066 	case WM_T_82575:
   10067 	case WM_T_82576:
   10068 	case WM_T_82580:
   10069 	case WM_T_I350:
   10070 	case WM_T_I354:
   10071 	case WM_T_I210:
   10072 	case WM_T_I211:
   10073 	case WM_T_80003:
   10074 		/* Null */
   10075 		break;
   10076 	case WM_T_82541:
   10077 	case WM_T_82547:
   10078 		/* XXX Configure actively LED after PHY reset */
   10079 		break;
   10080 	case WM_T_ICH8:
   10081 	case WM_T_ICH9:
   10082 	case WM_T_ICH10:
   10083 	case WM_T_PCH:
   10084 	case WM_T_PCH2:
   10085 	case WM_T_PCH_LPT:
   10086 	case WM_T_PCH_SPT:
   10087 	case WM_T_PCH_CNP:
   10088 		wm_phy_post_reset(sc);
   10089 		break;
   10090 	default:
   10091 		panic("%s: unknown type\n", __func__);
   10092 		break;
   10093 	}
   10094 }
   10095 
   10096 /*
   10097  * Setup sc_phytype and mii_{read|write}reg.
   10098  *
   10099  *  To identify PHY type, correct read/write function should be selected.
   10100  * To select correct read/write function, PCI ID or MAC type are required
   10101  * without accessing PHY registers.
   10102  *
   10103  *  On the first call of this function, PHY ID is not known yet. Check
   10104  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10105  * result might be incorrect.
   10106  *
   10107  *  In the second call, PHY OUI and model is used to identify PHY type.
   10108  * It might not be perfect because of the lack of compared entry, but it
   10109  * would be better than the first call.
   10110  *
   10111  *  If the detected new result and previous assumption is different,
   10112  * diagnous message will be printed.
   10113  */
   10114 static void
   10115 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10116     uint16_t phy_model)
   10117 {
   10118 	device_t dev = sc->sc_dev;
   10119 	struct mii_data *mii = &sc->sc_mii;
   10120 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10121 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10122 	mii_readreg_t new_readreg;
   10123 	mii_writereg_t new_writereg;
   10124 	bool dodiag = true;
   10125 
   10126 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10127 		device_xname(sc->sc_dev), __func__));
   10128 
   10129 	/*
   10130 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10131 	 * incorrect. So don't print diag output when it's 2nd call.
   10132 	 */
   10133 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10134 		dodiag = false;
   10135 
   10136 	if (mii->mii_readreg == NULL) {
   10137 		/*
   10138 		 *  This is the first call of this function. For ICH and PCH
   10139 		 * variants, it's difficult to determine the PHY access method
   10140 		 * by sc_type, so use the PCI product ID for some devices.
   10141 		 */
   10142 
   10143 		switch (sc->sc_pcidevid) {
   10144 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10145 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10146 			/* 82577 */
   10147 			new_phytype = WMPHY_82577;
   10148 			break;
   10149 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10150 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10151 			/* 82578 */
   10152 			new_phytype = WMPHY_82578;
   10153 			break;
   10154 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10155 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10156 			/* 82579 */
   10157 			new_phytype = WMPHY_82579;
   10158 			break;
   10159 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10160 		case PCI_PRODUCT_INTEL_82801I_BM:
   10161 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10162 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10163 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10164 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10165 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10166 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10167 			/* ICH8, 9, 10 with 82567 */
   10168 			new_phytype = WMPHY_BM;
   10169 			break;
   10170 		default:
   10171 			break;
   10172 		}
   10173 	} else {
   10174 		/* It's not the first call. Use PHY OUI and model */
   10175 		switch (phy_oui) {
   10176 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10177 			switch (phy_model) {
   10178 			case 0x0004: /* XXX */
   10179 				new_phytype = WMPHY_82578;
   10180 				break;
   10181 			default:
   10182 				break;
   10183 			}
   10184 			break;
   10185 		case MII_OUI_xxMARVELL:
   10186 			switch (phy_model) {
   10187 			case MII_MODEL_xxMARVELL_I210:
   10188 				new_phytype = WMPHY_I210;
   10189 				break;
   10190 			case MII_MODEL_xxMARVELL_E1011:
   10191 			case MII_MODEL_xxMARVELL_E1000_3:
   10192 			case MII_MODEL_xxMARVELL_E1000_5:
   10193 			case MII_MODEL_xxMARVELL_E1112:
   10194 				new_phytype = WMPHY_M88;
   10195 				break;
   10196 			case MII_MODEL_xxMARVELL_E1149:
   10197 				new_phytype = WMPHY_BM;
   10198 				break;
   10199 			case MII_MODEL_xxMARVELL_E1111:
   10200 			case MII_MODEL_xxMARVELL_I347:
   10201 			case MII_MODEL_xxMARVELL_E1512:
   10202 			case MII_MODEL_xxMARVELL_E1340M:
   10203 			case MII_MODEL_xxMARVELL_E1543:
   10204 				new_phytype = WMPHY_M88;
   10205 				break;
   10206 			case MII_MODEL_xxMARVELL_I82563:
   10207 				new_phytype = WMPHY_GG82563;
   10208 				break;
   10209 			default:
   10210 				break;
   10211 			}
   10212 			break;
   10213 		case MII_OUI_INTEL:
   10214 			switch (phy_model) {
   10215 			case MII_MODEL_INTEL_I82577:
   10216 				new_phytype = WMPHY_82577;
   10217 				break;
   10218 			case MII_MODEL_INTEL_I82579:
   10219 				new_phytype = WMPHY_82579;
   10220 				break;
   10221 			case MII_MODEL_INTEL_I217:
   10222 				new_phytype = WMPHY_I217;
   10223 				break;
   10224 			case MII_MODEL_INTEL_I82580:
   10225 			case MII_MODEL_INTEL_I350:
   10226 				new_phytype = WMPHY_82580;
   10227 				break;
   10228 			default:
   10229 				break;
   10230 			}
   10231 			break;
   10232 		case MII_OUI_yyINTEL:
   10233 			switch (phy_model) {
   10234 			case MII_MODEL_yyINTEL_I82562G:
   10235 			case MII_MODEL_yyINTEL_I82562EM:
   10236 			case MII_MODEL_yyINTEL_I82562ET:
   10237 				new_phytype = WMPHY_IFE;
   10238 				break;
   10239 			case MII_MODEL_yyINTEL_IGP01E1000:
   10240 				new_phytype = WMPHY_IGP;
   10241 				break;
   10242 			case MII_MODEL_yyINTEL_I82566:
   10243 				new_phytype = WMPHY_IGP_3;
   10244 				break;
   10245 			default:
   10246 				break;
   10247 			}
   10248 			break;
   10249 		default:
   10250 			break;
   10251 		}
   10252 
   10253 		if (dodiag) {
   10254 			if (new_phytype == WMPHY_UNKNOWN)
   10255 				aprint_verbose_dev(dev,
   10256 				    "%s: Unknown PHY model. OUI=%06x, "
   10257 				    "model=%04x\n", __func__, phy_oui,
   10258 				    phy_model);
   10259 
   10260 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10261 			    && (sc->sc_phytype != new_phytype)) {
   10262 				aprint_error_dev(dev, "Previously assumed PHY "
   10263 				    "type(%u) was incorrect. PHY type from PHY"
   10264 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10265 			}
   10266 		}
   10267 	}
   10268 
   10269 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10270 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10271 		/* SGMII */
   10272 		new_readreg = wm_sgmii_readreg;
   10273 		new_writereg = wm_sgmii_writereg;
   10274 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10275 		/* BM2 (phyaddr == 1) */
   10276 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10277 		    && (new_phytype != WMPHY_BM)
   10278 		    && (new_phytype != WMPHY_UNKNOWN))
   10279 			doubt_phytype = new_phytype;
   10280 		new_phytype = WMPHY_BM;
   10281 		new_readreg = wm_gmii_bm_readreg;
   10282 		new_writereg = wm_gmii_bm_writereg;
   10283 	} else if (sc->sc_type >= WM_T_PCH) {
   10284 		/* All PCH* use _hv_ */
   10285 		new_readreg = wm_gmii_hv_readreg;
   10286 		new_writereg = wm_gmii_hv_writereg;
   10287 	} else if (sc->sc_type >= WM_T_ICH8) {
   10288 		/* non-82567 ICH8, 9 and 10 */
   10289 		new_readreg = wm_gmii_i82544_readreg;
   10290 		new_writereg = wm_gmii_i82544_writereg;
   10291 	} else if (sc->sc_type >= WM_T_80003) {
   10292 		/* 80003 */
   10293 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10294 		    && (new_phytype != WMPHY_GG82563)
   10295 		    && (new_phytype != WMPHY_UNKNOWN))
   10296 			doubt_phytype = new_phytype;
   10297 		new_phytype = WMPHY_GG82563;
   10298 		new_readreg = wm_gmii_i80003_readreg;
   10299 		new_writereg = wm_gmii_i80003_writereg;
   10300 	} else if (sc->sc_type >= WM_T_I210) {
   10301 		/* I210 and I211 */
   10302 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10303 		    && (new_phytype != WMPHY_I210)
   10304 		    && (new_phytype != WMPHY_UNKNOWN))
   10305 			doubt_phytype = new_phytype;
   10306 		new_phytype = WMPHY_I210;
   10307 		new_readreg = wm_gmii_gs40g_readreg;
   10308 		new_writereg = wm_gmii_gs40g_writereg;
   10309 	} else if (sc->sc_type >= WM_T_82580) {
   10310 		/* 82580, I350 and I354 */
   10311 		new_readreg = wm_gmii_82580_readreg;
   10312 		new_writereg = wm_gmii_82580_writereg;
   10313 	} else if (sc->sc_type >= WM_T_82544) {
   10314 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10315 		new_readreg = wm_gmii_i82544_readreg;
   10316 		new_writereg = wm_gmii_i82544_writereg;
   10317 	} else {
   10318 		new_readreg = wm_gmii_i82543_readreg;
   10319 		new_writereg = wm_gmii_i82543_writereg;
   10320 	}
   10321 
   10322 	if (new_phytype == WMPHY_BM) {
   10323 		/* All BM use _bm_ */
   10324 		new_readreg = wm_gmii_bm_readreg;
   10325 		new_writereg = wm_gmii_bm_writereg;
   10326 	}
   10327 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10328 		/* All PCH* use _hv_ */
   10329 		new_readreg = wm_gmii_hv_readreg;
   10330 		new_writereg = wm_gmii_hv_writereg;
   10331 	}
   10332 
   10333 	/* Diag output */
   10334 	if (dodiag) {
   10335 		if (doubt_phytype != WMPHY_UNKNOWN)
   10336 			aprint_error_dev(dev, "Assumed new PHY type was "
   10337 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10338 			    new_phytype);
   10339 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10340 		    && (sc->sc_phytype != new_phytype))
   10341 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10342 			    "was incorrect. New PHY type = %u\n",
   10343 			    sc->sc_phytype, new_phytype);
   10344 
   10345 		if ((mii->mii_readreg != NULL) &&
   10346 		    (new_phytype == WMPHY_UNKNOWN))
   10347 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10348 
   10349 		if ((mii->mii_readreg != NULL) &&
   10350 		    (mii->mii_readreg != new_readreg))
   10351 			aprint_error_dev(dev, "Previously assumed PHY "
   10352 			    "read/write function was incorrect.\n");
   10353 	}
   10354 
   10355 	/* Update now */
   10356 	sc->sc_phytype = new_phytype;
   10357 	mii->mii_readreg = new_readreg;
   10358 	mii->mii_writereg = new_writereg;
   10359 	if (new_readreg == wm_gmii_hv_readreg) {
   10360 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10361 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10362 	} else if (new_readreg == wm_sgmii_readreg) {
   10363 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10364 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10365 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10366 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10367 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10368 	}
   10369 }
   10370 
   10371 /*
   10372  * wm_get_phy_id_82575:
   10373  *
   10374  * Return PHY ID. Return -1 if it failed.
   10375  */
   10376 static int
   10377 wm_get_phy_id_82575(struct wm_softc *sc)
   10378 {
   10379 	uint32_t reg;
   10380 	int phyid = -1;
   10381 
   10382 	/* XXX */
   10383 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10384 		return -1;
   10385 
   10386 	if (wm_sgmii_uses_mdio(sc)) {
   10387 		switch (sc->sc_type) {
   10388 		case WM_T_82575:
   10389 		case WM_T_82576:
   10390 			reg = CSR_READ(sc, WMREG_MDIC);
   10391 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10392 			break;
   10393 		case WM_T_82580:
   10394 		case WM_T_I350:
   10395 		case WM_T_I354:
   10396 		case WM_T_I210:
   10397 		case WM_T_I211:
   10398 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10399 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10400 			break;
   10401 		default:
   10402 			return -1;
   10403 		}
   10404 	}
   10405 
   10406 	return phyid;
   10407 }
   10408 
   10409 
   10410 /*
   10411  * wm_gmii_mediainit:
   10412  *
   10413  *	Initialize media for use on 1000BASE-T devices.
   10414  */
   10415 static void
   10416 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10417 {
   10418 	device_t dev = sc->sc_dev;
   10419 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10420 	struct mii_data *mii = &sc->sc_mii;
   10421 
   10422 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10423 		device_xname(sc->sc_dev), __func__));
   10424 
   10425 	/* We have GMII. */
   10426 	sc->sc_flags |= WM_F_HAS_MII;
   10427 
   10428 	if (sc->sc_type == WM_T_80003)
   10429 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10430 	else
   10431 		sc->sc_tipg = TIPG_1000T_DFLT;
   10432 
   10433 	/*
   10434 	 * Let the chip set speed/duplex on its own based on
   10435 	 * signals from the PHY.
   10436 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10437 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10438 	 */
   10439 	sc->sc_ctrl |= CTRL_SLU;
   10440 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10441 
   10442 	/* Initialize our media structures and probe the GMII. */
   10443 	mii->mii_ifp = ifp;
   10444 
   10445 	mii->mii_statchg = wm_gmii_statchg;
   10446 
   10447 	/* get PHY control from SMBus to PCIe */
   10448 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10449 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10450 	    || (sc->sc_type == WM_T_PCH_CNP))
   10451 		wm_init_phy_workarounds_pchlan(sc);
   10452 
   10453 	wm_gmii_reset(sc);
   10454 
   10455 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10456 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10457 	    wm_gmii_mediastatus, sc->sc_core_lock);
   10458 
   10459 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10460 	    || (sc->sc_type == WM_T_82580)
   10461 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10462 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10463 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10464 			/* Attach only one port */
   10465 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10466 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10467 		} else {
   10468 			int i, id;
   10469 			uint32_t ctrl_ext;
   10470 
   10471 			id = wm_get_phy_id_82575(sc);
   10472 			if (id != -1) {
   10473 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10474 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10475 			}
   10476 			if ((id == -1)
   10477 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10478 				/* Power on sgmii phy if it is disabled */
   10479 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10480 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10481 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10482 				CSR_WRITE_FLUSH(sc);
   10483 				delay(300*1000); /* XXX too long */
   10484 
   10485 				/*
   10486 				 * From 1 to 8.
   10487 				 *
   10488 				 * I2C access fails with I2C register's ERROR
   10489 				 * bit set, so prevent error message while
   10490 				 * scanning.
   10491 				 */
   10492 				sc->phy.no_errprint = true;
   10493 				for (i = 1; i < 8; i++)
   10494 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10495 					    0xffffffff, i, MII_OFFSET_ANY,
   10496 					    MIIF_DOPAUSE);
   10497 				sc->phy.no_errprint = false;
   10498 
   10499 				/* Restore previous sfp cage power state */
   10500 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10501 			}
   10502 		}
   10503 	} else
   10504 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10505 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10506 
   10507 	/*
   10508 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10509 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10510 	 */
   10511 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10512 		|| (sc->sc_type == WM_T_PCH_SPT)
   10513 		|| (sc->sc_type == WM_T_PCH_CNP))
   10514 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10515 		wm_set_mdio_slow_mode_hv(sc);
   10516 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10517 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10518 	}
   10519 
   10520 	/*
   10521 	 * (For ICH8 variants)
   10522 	 * If PHY detection failed, use BM's r/w function and retry.
   10523 	 */
   10524 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10525 		/* if failed, retry with *_bm_* */
   10526 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10527 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10528 		    sc->sc_phytype);
   10529 		sc->sc_phytype = WMPHY_BM;
   10530 		mii->mii_readreg = wm_gmii_bm_readreg;
   10531 		mii->mii_writereg = wm_gmii_bm_writereg;
   10532 
   10533 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10534 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10535 	}
   10536 
   10537 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10538 		/* Any PHY wasn't find */
   10539 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10540 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10541 		sc->sc_phytype = WMPHY_NONE;
   10542 	} else {
   10543 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10544 
   10545 		/*
   10546 		 * PHY Found! Check PHY type again by the second call of
   10547 		 * wm_gmii_setup_phytype.
   10548 		 */
   10549 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10550 		    child->mii_mpd_model);
   10551 
   10552 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10553 	}
   10554 }
   10555 
   10556 /*
   10557  * wm_gmii_mediachange:	[ifmedia interface function]
   10558  *
   10559  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10560  */
   10561 static int
   10562 wm_gmii_mediachange(struct ifnet *ifp)
   10563 {
   10564 	struct wm_softc *sc = ifp->if_softc;
   10565 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10566 	uint32_t reg;
   10567 	int rc;
   10568 
   10569 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10570 		device_xname(sc->sc_dev), __func__));
   10571 	if ((ifp->if_flags & IFF_UP) == 0)
   10572 		return 0;
   10573 
   10574 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10575 	if ((sc->sc_type == WM_T_82580)
   10576 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10577 	    || (sc->sc_type == WM_T_I211)) {
   10578 		reg = CSR_READ(sc, WMREG_PHPM);
   10579 		reg &= ~PHPM_GO_LINK_D;
   10580 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10581 	}
   10582 
   10583 	/* Disable D0 LPLU. */
   10584 	wm_lplu_d0_disable(sc);
   10585 
   10586 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10587 	sc->sc_ctrl |= CTRL_SLU;
   10588 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10589 	    || (sc->sc_type > WM_T_82543)) {
   10590 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10591 	} else {
   10592 		sc->sc_ctrl &= ~CTRL_ASDE;
   10593 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10594 		if (ife->ifm_media & IFM_FDX)
   10595 			sc->sc_ctrl |= CTRL_FD;
   10596 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10597 		case IFM_10_T:
   10598 			sc->sc_ctrl |= CTRL_SPEED_10;
   10599 			break;
   10600 		case IFM_100_TX:
   10601 			sc->sc_ctrl |= CTRL_SPEED_100;
   10602 			break;
   10603 		case IFM_1000_T:
   10604 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10605 			break;
   10606 		case IFM_NONE:
   10607 			/* There is no specific setting for IFM_NONE */
   10608 			break;
   10609 		default:
   10610 			panic("wm_gmii_mediachange: bad media 0x%x",
   10611 			    ife->ifm_media);
   10612 		}
   10613 	}
   10614 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10615 	CSR_WRITE_FLUSH(sc);
   10616 
   10617 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10618 		wm_serdes_mediachange(ifp);
   10619 
   10620 	if (sc->sc_type <= WM_T_82543)
   10621 		wm_gmii_reset(sc);
   10622 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10623 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10624 		/* allow time for SFP cage time to power up phy */
   10625 		delay(300 * 1000);
   10626 		wm_gmii_reset(sc);
   10627 	}
   10628 
   10629 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10630 		return 0;
   10631 	return rc;
   10632 }
   10633 
   10634 /*
   10635  * wm_gmii_mediastatus:	[ifmedia interface function]
   10636  *
   10637  *	Get the current interface media status on a 1000BASE-T device.
   10638  */
   10639 static void
   10640 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10641 {
   10642 	struct wm_softc *sc = ifp->if_softc;
   10643 
   10644 	ether_mediastatus(ifp, ifmr);
   10645 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10646 	    | sc->sc_flowflags;
   10647 }
   10648 
   10649 #define	MDI_IO		CTRL_SWDPIN(2)
   10650 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10651 #define	MDI_CLK		CTRL_SWDPIN(3)
   10652 
   10653 static void
   10654 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10655 {
   10656 	uint32_t i, v;
   10657 
   10658 	v = CSR_READ(sc, WMREG_CTRL);
   10659 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10660 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10661 
   10662 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10663 		if (data & i)
   10664 			v |= MDI_IO;
   10665 		else
   10666 			v &= ~MDI_IO;
   10667 		CSR_WRITE(sc, WMREG_CTRL, v);
   10668 		CSR_WRITE_FLUSH(sc);
   10669 		delay(10);
   10670 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10671 		CSR_WRITE_FLUSH(sc);
   10672 		delay(10);
   10673 		CSR_WRITE(sc, WMREG_CTRL, v);
   10674 		CSR_WRITE_FLUSH(sc);
   10675 		delay(10);
   10676 	}
   10677 }
   10678 
   10679 static uint16_t
   10680 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10681 {
   10682 	uint32_t v, i;
   10683 	uint16_t data = 0;
   10684 
   10685 	v = CSR_READ(sc, WMREG_CTRL);
   10686 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10687 	v |= CTRL_SWDPIO(3);
   10688 
   10689 	CSR_WRITE(sc, WMREG_CTRL, v);
   10690 	CSR_WRITE_FLUSH(sc);
   10691 	delay(10);
   10692 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10693 	CSR_WRITE_FLUSH(sc);
   10694 	delay(10);
   10695 	CSR_WRITE(sc, WMREG_CTRL, v);
   10696 	CSR_WRITE_FLUSH(sc);
   10697 	delay(10);
   10698 
   10699 	for (i = 0; i < 16; i++) {
   10700 		data <<= 1;
   10701 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10702 		CSR_WRITE_FLUSH(sc);
   10703 		delay(10);
   10704 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10705 			data |= 1;
   10706 		CSR_WRITE(sc, WMREG_CTRL, v);
   10707 		CSR_WRITE_FLUSH(sc);
   10708 		delay(10);
   10709 	}
   10710 
   10711 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10712 	CSR_WRITE_FLUSH(sc);
   10713 	delay(10);
   10714 	CSR_WRITE(sc, WMREG_CTRL, v);
   10715 	CSR_WRITE_FLUSH(sc);
   10716 	delay(10);
   10717 
   10718 	return data;
   10719 }
   10720 
   10721 #undef MDI_IO
   10722 #undef MDI_DIR
   10723 #undef MDI_CLK
   10724 
   10725 /*
   10726  * wm_gmii_i82543_readreg:	[mii interface function]
   10727  *
   10728  *	Read a PHY register on the GMII (i82543 version).
   10729  */
   10730 static int
   10731 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10732 {
   10733 	struct wm_softc *sc = device_private(dev);
   10734 
   10735 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10736 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10737 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10738 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10739 
   10740 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10741 		device_xname(dev), phy, reg, *val));
   10742 
   10743 	return 0;
   10744 }
   10745 
   10746 /*
   10747  * wm_gmii_i82543_writereg:	[mii interface function]
   10748  *
   10749  *	Write a PHY register on the GMII (i82543 version).
   10750  */
   10751 static int
   10752 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10753 {
   10754 	struct wm_softc *sc = device_private(dev);
   10755 
   10756 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10757 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10758 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10759 	    (MII_COMMAND_START << 30), 32);
   10760 
   10761 	return 0;
   10762 }
   10763 
   10764 /*
   10765  * wm_gmii_mdic_readreg:	[mii interface function]
   10766  *
   10767  *	Read a PHY register on the GMII.
   10768  */
   10769 static int
   10770 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10771 {
   10772 	struct wm_softc *sc = device_private(dev);
   10773 	uint32_t mdic = 0;
   10774 	int i;
   10775 
   10776 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10777 	    && (reg > MII_ADDRMASK)) {
   10778 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10779 		    __func__, sc->sc_phytype, reg);
   10780 		reg &= MII_ADDRMASK;
   10781 	}
   10782 
   10783 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10784 	    MDIC_REGADD(reg));
   10785 
   10786 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10787 		delay(50);
   10788 		mdic = CSR_READ(sc, WMREG_MDIC);
   10789 		if (mdic & MDIC_READY)
   10790 			break;
   10791 	}
   10792 
   10793 	if ((mdic & MDIC_READY) == 0) {
   10794 		DPRINTF(WM_DEBUG_GMII,
   10795 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10796 			device_xname(dev), phy, reg));
   10797 		return ETIMEDOUT;
   10798 	} else if (mdic & MDIC_E) {
   10799 		/* This is normal if no PHY is present. */
   10800 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10801 			device_xname(sc->sc_dev), phy, reg));
   10802 		return -1;
   10803 	} else
   10804 		*val = MDIC_DATA(mdic);
   10805 
   10806 	/*
   10807 	 * Allow some time after each MDIC transaction to avoid
   10808 	 * reading duplicate data in the next MDIC transaction.
   10809 	 */
   10810 	if (sc->sc_type == WM_T_PCH2)
   10811 		delay(100);
   10812 
   10813 	return 0;
   10814 }
   10815 
   10816 /*
   10817  * wm_gmii_mdic_writereg:	[mii interface function]
   10818  *
   10819  *	Write a PHY register on the GMII.
   10820  */
   10821 static int
   10822 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10823 {
   10824 	struct wm_softc *sc = device_private(dev);
   10825 	uint32_t mdic = 0;
   10826 	int i;
   10827 
   10828 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10829 	    && (reg > MII_ADDRMASK)) {
   10830 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10831 		    __func__, sc->sc_phytype, reg);
   10832 		reg &= MII_ADDRMASK;
   10833 	}
   10834 
   10835 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10836 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10837 
   10838 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10839 		delay(50);
   10840 		mdic = CSR_READ(sc, WMREG_MDIC);
   10841 		if (mdic & MDIC_READY)
   10842 			break;
   10843 	}
   10844 
   10845 	if ((mdic & MDIC_READY) == 0) {
   10846 		DPRINTF(WM_DEBUG_GMII,
   10847 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10848 			device_xname(dev), phy, reg));
   10849 		return ETIMEDOUT;
   10850 	} else if (mdic & MDIC_E) {
   10851 		DPRINTF(WM_DEBUG_GMII,
   10852 		    ("%s: MDIC write error: phy %d reg %d\n",
   10853 			device_xname(dev), phy, reg));
   10854 		return -1;
   10855 	}
   10856 
   10857 	/*
   10858 	 * Allow some time after each MDIC transaction to avoid
   10859 	 * reading duplicate data in the next MDIC transaction.
   10860 	 */
   10861 	if (sc->sc_type == WM_T_PCH2)
   10862 		delay(100);
   10863 
   10864 	return 0;
   10865 }
   10866 
   10867 /*
   10868  * wm_gmii_i82544_readreg:	[mii interface function]
   10869  *
   10870  *	Read a PHY register on the GMII.
   10871  */
   10872 static int
   10873 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10874 {
   10875 	struct wm_softc *sc = device_private(dev);
   10876 	int rv;
   10877 
   10878 	if (sc->phy.acquire(sc)) {
   10879 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10880 		return -1;
   10881 	}
   10882 
   10883 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10884 
   10885 	sc->phy.release(sc);
   10886 
   10887 	return rv;
   10888 }
   10889 
   10890 static int
   10891 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10892 {
   10893 	struct wm_softc *sc = device_private(dev);
   10894 	int rv;
   10895 
   10896 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10897 		switch (sc->sc_phytype) {
   10898 		case WMPHY_IGP:
   10899 		case WMPHY_IGP_2:
   10900 		case WMPHY_IGP_3:
   10901 			rv = wm_gmii_mdic_writereg(dev, phy,
   10902 			    MII_IGPHY_PAGE_SELECT, reg);
   10903 			if (rv != 0)
   10904 				return rv;
   10905 			break;
   10906 		default:
   10907 #ifdef WM_DEBUG
   10908 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10909 			    __func__, sc->sc_phytype, reg);
   10910 #endif
   10911 			break;
   10912 		}
   10913 	}
   10914 
   10915 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10916 }
   10917 
   10918 /*
   10919  * wm_gmii_i82544_writereg:	[mii interface function]
   10920  *
   10921  *	Write a PHY register on the GMII.
   10922  */
   10923 static int
   10924 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10925 {
   10926 	struct wm_softc *sc = device_private(dev);
   10927 	int rv;
   10928 
   10929 	if (sc->phy.acquire(sc)) {
   10930 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10931 		return -1;
   10932 	}
   10933 
   10934 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10935 	sc->phy.release(sc);
   10936 
   10937 	return rv;
   10938 }
   10939 
   10940 static int
   10941 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10942 {
   10943 	struct wm_softc *sc = device_private(dev);
   10944 	int rv;
   10945 
   10946 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10947 		switch (sc->sc_phytype) {
   10948 		case WMPHY_IGP:
   10949 		case WMPHY_IGP_2:
   10950 		case WMPHY_IGP_3:
   10951 			rv = wm_gmii_mdic_writereg(dev, phy,
   10952 			    MII_IGPHY_PAGE_SELECT, reg);
   10953 			if (rv != 0)
   10954 				return rv;
   10955 			break;
   10956 		default:
   10957 #ifdef WM_DEBUG
   10958 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10959 			    __func__, sc->sc_phytype, reg);
   10960 #endif
   10961 			break;
   10962 		}
   10963 	}
   10964 
   10965 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10966 }
   10967 
   10968 /*
   10969  * wm_gmii_i80003_readreg:	[mii interface function]
   10970  *
   10971  *	Read a PHY register on the kumeran
   10972  * This could be handled by the PHY layer if we didn't have to lock the
   10973  * ressource ...
   10974  */
   10975 static int
   10976 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10977 {
   10978 	struct wm_softc *sc = device_private(dev);
   10979 	int page_select;
   10980 	uint16_t temp, temp2;
   10981 	int rv = 0;
   10982 
   10983 	if (phy != 1) /* Only one PHY on kumeran bus */
   10984 		return -1;
   10985 
   10986 	if (sc->phy.acquire(sc)) {
   10987 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10988 		return -1;
   10989 	}
   10990 
   10991 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10992 		page_select = GG82563_PHY_PAGE_SELECT;
   10993 	else {
   10994 		/*
   10995 		 * Use Alternative Page Select register to access registers
   10996 		 * 30 and 31.
   10997 		 */
   10998 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10999 	}
   11000 	temp = reg >> GG82563_PAGE_SHIFT;
   11001 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11002 		goto out;
   11003 
   11004 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11005 		/*
   11006 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11007 		 * register.
   11008 		 */
   11009 		delay(200);
   11010 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11011 		if ((rv != 0) || (temp2 != temp)) {
   11012 			device_printf(dev, "%s failed\n", __func__);
   11013 			rv = -1;
   11014 			goto out;
   11015 		}
   11016 		delay(200);
   11017 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11018 		delay(200);
   11019 	} else
   11020 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11021 
   11022 out:
   11023 	sc->phy.release(sc);
   11024 	return rv;
   11025 }
   11026 
   11027 /*
   11028  * wm_gmii_i80003_writereg:	[mii interface function]
   11029  *
   11030  *	Write a PHY register on the kumeran.
   11031  * This could be handled by the PHY layer if we didn't have to lock the
   11032  * ressource ...
   11033  */
   11034 static int
   11035 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11036 {
   11037 	struct wm_softc *sc = device_private(dev);
   11038 	int page_select, rv;
   11039 	uint16_t temp, temp2;
   11040 
   11041 	if (phy != 1) /* Only one PHY on kumeran bus */
   11042 		return -1;
   11043 
   11044 	if (sc->phy.acquire(sc)) {
   11045 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11046 		return -1;
   11047 	}
   11048 
   11049 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11050 		page_select = GG82563_PHY_PAGE_SELECT;
   11051 	else {
   11052 		/*
   11053 		 * Use Alternative Page Select register to access registers
   11054 		 * 30 and 31.
   11055 		 */
   11056 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11057 	}
   11058 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11059 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11060 		goto out;
   11061 
   11062 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11063 		/*
   11064 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11065 		 * register.
   11066 		 */
   11067 		delay(200);
   11068 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11069 		if ((rv != 0) || (temp2 != temp)) {
   11070 			device_printf(dev, "%s failed\n", __func__);
   11071 			rv = -1;
   11072 			goto out;
   11073 		}
   11074 		delay(200);
   11075 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11076 		delay(200);
   11077 	} else
   11078 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11079 
   11080 out:
   11081 	sc->phy.release(sc);
   11082 	return rv;
   11083 }
   11084 
   11085 /*
   11086  * wm_gmii_bm_readreg:	[mii interface function]
   11087  *
   11088  *	Read a PHY register on the kumeran
   11089  * This could be handled by the PHY layer if we didn't have to lock the
   11090  * ressource ...
   11091  */
   11092 static int
   11093 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11094 {
   11095 	struct wm_softc *sc = device_private(dev);
   11096 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11097 	int rv;
   11098 
   11099 	if (sc->phy.acquire(sc)) {
   11100 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11101 		return -1;
   11102 	}
   11103 
   11104 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11105 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11106 		    || (reg == 31)) ? 1 : phy;
   11107 	/* Page 800 works differently than the rest so it has its own func */
   11108 	if (page == BM_WUC_PAGE) {
   11109 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11110 		goto release;
   11111 	}
   11112 
   11113 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11114 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11115 		    && (sc->sc_type != WM_T_82583))
   11116 			rv = wm_gmii_mdic_writereg(dev, phy,
   11117 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11118 		else
   11119 			rv = wm_gmii_mdic_writereg(dev, phy,
   11120 			    BME1000_PHY_PAGE_SELECT, page);
   11121 		if (rv != 0)
   11122 			goto release;
   11123 	}
   11124 
   11125 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11126 
   11127 release:
   11128 	sc->phy.release(sc);
   11129 	return rv;
   11130 }
   11131 
   11132 /*
   11133  * wm_gmii_bm_writereg:	[mii interface function]
   11134  *
   11135  *	Write a PHY register on the kumeran.
   11136  * This could be handled by the PHY layer if we didn't have to lock the
   11137  * ressource ...
   11138  */
   11139 static int
   11140 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11141 {
   11142 	struct wm_softc *sc = device_private(dev);
   11143 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11144 	int rv;
   11145 
   11146 	if (sc->phy.acquire(sc)) {
   11147 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11148 		return -1;
   11149 	}
   11150 
   11151 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11152 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11153 		    || (reg == 31)) ? 1 : phy;
   11154 	/* Page 800 works differently than the rest so it has its own func */
   11155 	if (page == BM_WUC_PAGE) {
   11156 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11157 		goto release;
   11158 	}
   11159 
   11160 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11161 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11162 		    && (sc->sc_type != WM_T_82583))
   11163 			rv = wm_gmii_mdic_writereg(dev, phy,
   11164 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11165 		else
   11166 			rv = wm_gmii_mdic_writereg(dev, phy,
   11167 			    BME1000_PHY_PAGE_SELECT, page);
   11168 		if (rv != 0)
   11169 			goto release;
   11170 	}
   11171 
   11172 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11173 
   11174 release:
   11175 	sc->phy.release(sc);
   11176 	return rv;
   11177 }
   11178 
   11179 /*
   11180  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11181  *  @dev: pointer to the HW structure
   11182  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11183  *
   11184  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11185  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11186  */
   11187 static int
   11188 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11189 {
   11190 	uint16_t temp;
   11191 	int rv;
   11192 
   11193 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11194 		device_xname(dev), __func__));
   11195 
   11196 	if (!phy_regp)
   11197 		return -1;
   11198 
   11199 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11200 
   11201 	/* Select Port Control Registers page */
   11202 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11203 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11204 	if (rv != 0)
   11205 		return rv;
   11206 
   11207 	/* Read WUCE and save it */
   11208 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11209 	if (rv != 0)
   11210 		return rv;
   11211 
   11212 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11213 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11214 	 */
   11215 	temp = *phy_regp;
   11216 	temp |= BM_WUC_ENABLE_BIT;
   11217 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11218 
   11219 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11220 		return rv;
   11221 
   11222 	/* Select Host Wakeup Registers page - caller now able to write
   11223 	 * registers on the Wakeup registers page
   11224 	 */
   11225 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11226 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11227 }
   11228 
   11229 /*
   11230  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11231  *  @dev: pointer to the HW structure
   11232  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11233  *
   11234  *  Restore BM_WUC_ENABLE_REG to its original value.
   11235  *
   11236  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11237  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11238  *  caller.
   11239  */
   11240 static int
   11241 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11242 {
   11243 
   11244 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11245 		device_xname(dev), __func__));
   11246 
   11247 	if (!phy_regp)
   11248 		return -1;
   11249 
   11250 	/* Select Port Control Registers page */
   11251 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11252 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11253 
   11254 	/* Restore 769.17 to its original value */
   11255 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11256 
   11257 	return 0;
   11258 }
   11259 
   11260 /*
   11261  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11262  *  @sc: pointer to the HW structure
   11263  *  @offset: register offset to be read or written
   11264  *  @val: pointer to the data to read or write
   11265  *  @rd: determines if operation is read or write
   11266  *  @page_set: BM_WUC_PAGE already set and access enabled
   11267  *
   11268  *  Read the PHY register at offset and store the retrieved information in
   11269  *  data, or write data to PHY register at offset.  Note the procedure to
   11270  *  access the PHY wakeup registers is different than reading the other PHY
   11271  *  registers. It works as such:
   11272  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11273  *  2) Set page to 800 for host (801 if we were manageability)
   11274  *  3) Write the address using the address opcode (0x11)
   11275  *  4) Read or write the data using the data opcode (0x12)
   11276  *  5) Restore 769.17.2 to its original value
   11277  *
   11278  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11279  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11280  *
   11281  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11282  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11283  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11284  */
   11285 static int
   11286 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11287 	bool page_set)
   11288 {
   11289 	struct wm_softc *sc = device_private(dev);
   11290 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11291 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11292 	uint16_t wuce;
   11293 	int rv = 0;
   11294 
   11295 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11296 		device_xname(dev), __func__));
   11297 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11298 	if ((sc->sc_type == WM_T_PCH)
   11299 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11300 		device_printf(dev,
   11301 		    "Attempting to access page %d while gig enabled.\n", page);
   11302 	}
   11303 
   11304 	if (!page_set) {
   11305 		/* Enable access to PHY wakeup registers */
   11306 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11307 		if (rv != 0) {
   11308 			device_printf(dev,
   11309 			    "%s: Could not enable PHY wakeup reg access\n",
   11310 			    __func__);
   11311 			return rv;
   11312 		}
   11313 	}
   11314 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11315 		device_xname(sc->sc_dev), __func__, page, regnum));
   11316 
   11317 	/*
   11318 	 * 2) Access PHY wakeup register.
   11319 	 * See wm_access_phy_wakeup_reg_bm.
   11320 	 */
   11321 
   11322 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11323 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11324 	if (rv != 0)
   11325 		return rv;
   11326 
   11327 	if (rd) {
   11328 		/* Read the Wakeup register page value using opcode 0x12 */
   11329 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11330 	} else {
   11331 		/* Write the Wakeup register page value using opcode 0x12 */
   11332 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11333 	}
   11334 	if (rv != 0)
   11335 		return rv;
   11336 
   11337 	if (!page_set)
   11338 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11339 
   11340 	return rv;
   11341 }
   11342 
   11343 /*
   11344  * wm_gmii_hv_readreg:	[mii interface function]
   11345  *
   11346  *	Read a PHY register on the kumeran
   11347  * This could be handled by the PHY layer if we didn't have to lock the
   11348  * ressource ...
   11349  */
   11350 static int
   11351 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11352 {
   11353 	struct wm_softc *sc = device_private(dev);
   11354 	int rv;
   11355 
   11356 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11357 		device_xname(dev), __func__));
   11358 	if (sc->phy.acquire(sc)) {
   11359 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11360 		return -1;
   11361 	}
   11362 
   11363 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11364 	sc->phy.release(sc);
   11365 	return rv;
   11366 }
   11367 
   11368 static int
   11369 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11370 {
   11371 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11372 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11373 	int rv;
   11374 
   11375 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11376 
   11377 	/* Page 800 works differently than the rest so it has its own func */
   11378 	if (page == BM_WUC_PAGE)
   11379 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11380 
   11381 	/*
   11382 	 * Lower than page 768 works differently than the rest so it has its
   11383 	 * own func
   11384 	 */
   11385 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11386 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11387 		return -1;
   11388 	}
   11389 
   11390 	/*
   11391 	 * XXX I21[789] documents say that the SMBus Address register is at
   11392 	 * PHY address 01, Page 0 (not 768), Register 26.
   11393 	 */
   11394 	if (page == HV_INTC_FC_PAGE_START)
   11395 		page = 0;
   11396 
   11397 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11398 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11399 		    page << BME1000_PAGE_SHIFT);
   11400 		if (rv != 0)
   11401 			return rv;
   11402 	}
   11403 
   11404 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11405 }
   11406 
   11407 /*
   11408  * wm_gmii_hv_writereg:	[mii interface function]
   11409  *
   11410  *	Write a PHY register on the kumeran.
   11411  * This could be handled by the PHY layer if we didn't have to lock the
   11412  * ressource ...
   11413  */
   11414 static int
   11415 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11416 {
   11417 	struct wm_softc *sc = device_private(dev);
   11418 	int rv;
   11419 
   11420 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11421 		device_xname(dev), __func__));
   11422 
   11423 	if (sc->phy.acquire(sc)) {
   11424 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11425 		return -1;
   11426 	}
   11427 
   11428 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11429 	sc->phy.release(sc);
   11430 
   11431 	return rv;
   11432 }
   11433 
   11434 static int
   11435 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11436 {
   11437 	struct wm_softc *sc = device_private(dev);
   11438 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11439 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11440 	int rv;
   11441 
   11442 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11443 
   11444 	/* Page 800 works differently than the rest so it has its own func */
   11445 	if (page == BM_WUC_PAGE)
   11446 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11447 		    false);
   11448 
   11449 	/*
   11450 	 * Lower than page 768 works differently than the rest so it has its
   11451 	 * own func
   11452 	 */
   11453 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11454 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11455 		return -1;
   11456 	}
   11457 
   11458 	{
   11459 		/*
   11460 		 * XXX I21[789] documents say that the SMBus Address register
   11461 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11462 		 */
   11463 		if (page == HV_INTC_FC_PAGE_START)
   11464 			page = 0;
   11465 
   11466 		/*
   11467 		 * XXX Workaround MDIO accesses being disabled after entering
   11468 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11469 		 * register is set)
   11470 		 */
   11471 		if (sc->sc_phytype == WMPHY_82578) {
   11472 			struct mii_softc *child;
   11473 
   11474 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11475 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11476 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11477 			    && ((val & (1 << 11)) != 0)) {
   11478 				device_printf(dev, "XXX need workaround\n");
   11479 			}
   11480 		}
   11481 
   11482 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11483 			rv = wm_gmii_mdic_writereg(dev, 1,
   11484 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11485 			if (rv != 0)
   11486 				return rv;
   11487 		}
   11488 	}
   11489 
   11490 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11491 }
   11492 
   11493 /*
   11494  * wm_gmii_82580_readreg:	[mii interface function]
   11495  *
   11496  *	Read a PHY register on the 82580 and I350.
   11497  * This could be handled by the PHY layer if we didn't have to lock the
   11498  * ressource ...
   11499  */
   11500 static int
   11501 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11502 {
   11503 	struct wm_softc *sc = device_private(dev);
   11504 	int rv;
   11505 
   11506 	if (sc->phy.acquire(sc) != 0) {
   11507 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11508 		return -1;
   11509 	}
   11510 
   11511 #ifdef DIAGNOSTIC
   11512 	if (reg > MII_ADDRMASK) {
   11513 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11514 		    __func__, sc->sc_phytype, reg);
   11515 		reg &= MII_ADDRMASK;
   11516 	}
   11517 #endif
   11518 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11519 
   11520 	sc->phy.release(sc);
   11521 	return rv;
   11522 }
   11523 
   11524 /*
   11525  * wm_gmii_82580_writereg:	[mii interface function]
   11526  *
   11527  *	Write a PHY register on the 82580 and I350.
   11528  * This could be handled by the PHY layer if we didn't have to lock the
   11529  * ressource ...
   11530  */
   11531 static int
   11532 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11533 {
   11534 	struct wm_softc *sc = device_private(dev);
   11535 	int rv;
   11536 
   11537 	if (sc->phy.acquire(sc) != 0) {
   11538 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11539 		return -1;
   11540 	}
   11541 
   11542 #ifdef DIAGNOSTIC
   11543 	if (reg > MII_ADDRMASK) {
   11544 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11545 		    __func__, sc->sc_phytype, reg);
   11546 		reg &= MII_ADDRMASK;
   11547 	}
   11548 #endif
   11549 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11550 
   11551 	sc->phy.release(sc);
   11552 	return rv;
   11553 }
   11554 
   11555 /*
   11556  * wm_gmii_gs40g_readreg:	[mii interface function]
   11557  *
   11558  *	Read a PHY register on the I2100 and I211.
   11559  * This could be handled by the PHY layer if we didn't have to lock the
   11560  * ressource ...
   11561  */
   11562 static int
   11563 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11564 {
   11565 	struct wm_softc *sc = device_private(dev);
   11566 	int page, offset;
   11567 	int rv;
   11568 
   11569 	/* Acquire semaphore */
   11570 	if (sc->phy.acquire(sc)) {
   11571 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11572 		return -1;
   11573 	}
   11574 
   11575 	/* Page select */
   11576 	page = reg >> GS40G_PAGE_SHIFT;
   11577 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11578 	if (rv != 0)
   11579 		goto release;
   11580 
   11581 	/* Read reg */
   11582 	offset = reg & GS40G_OFFSET_MASK;
   11583 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11584 
   11585 release:
   11586 	sc->phy.release(sc);
   11587 	return rv;
   11588 }
   11589 
   11590 /*
   11591  * wm_gmii_gs40g_writereg:	[mii interface function]
   11592  *
   11593  *	Write a PHY register on the I210 and I211.
   11594  * This could be handled by the PHY layer if we didn't have to lock the
   11595  * ressource ...
   11596  */
   11597 static int
   11598 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11599 {
   11600 	struct wm_softc *sc = device_private(dev);
   11601 	uint16_t page;
   11602 	int offset, rv;
   11603 
   11604 	/* Acquire semaphore */
   11605 	if (sc->phy.acquire(sc)) {
   11606 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11607 		return -1;
   11608 	}
   11609 
   11610 	/* Page select */
   11611 	page = reg >> GS40G_PAGE_SHIFT;
   11612 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11613 	if (rv != 0)
   11614 		goto release;
   11615 
   11616 	/* Write reg */
   11617 	offset = reg & GS40G_OFFSET_MASK;
   11618 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11619 
   11620 release:
   11621 	/* Release semaphore */
   11622 	sc->phy.release(sc);
   11623 	return rv;
   11624 }
   11625 
   11626 /*
   11627  * wm_gmii_statchg:	[mii interface function]
   11628  *
   11629  *	Callback from MII layer when media changes.
   11630  */
   11631 static void
   11632 wm_gmii_statchg(struct ifnet *ifp)
   11633 {
   11634 	struct wm_softc *sc = ifp->if_softc;
   11635 	struct mii_data *mii = &sc->sc_mii;
   11636 
   11637 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11638 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11639 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11640 
   11641 	/* Get flow control negotiation result. */
   11642 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11643 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11644 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11645 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11646 	}
   11647 
   11648 	if (sc->sc_flowflags & IFM_FLOW) {
   11649 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11650 			sc->sc_ctrl |= CTRL_TFCE;
   11651 			sc->sc_fcrtl |= FCRTL_XONE;
   11652 		}
   11653 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11654 			sc->sc_ctrl |= CTRL_RFCE;
   11655 	}
   11656 
   11657 	if (mii->mii_media_active & IFM_FDX) {
   11658 		DPRINTF(WM_DEBUG_LINK,
   11659 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11660 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11661 	} else {
   11662 		DPRINTF(WM_DEBUG_LINK,
   11663 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11664 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11665 	}
   11666 
   11667 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11668 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11669 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11670 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11671 	if (sc->sc_type == WM_T_80003) {
   11672 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11673 		case IFM_1000_T:
   11674 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11675 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11676 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11677 			break;
   11678 		default:
   11679 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11680 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11681 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11682 			break;
   11683 		}
   11684 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11685 	}
   11686 }
   11687 
   11688 /* kumeran related (80003, ICH* and PCH*) */
   11689 
   11690 /*
   11691  * wm_kmrn_readreg:
   11692  *
   11693  *	Read a kumeran register
   11694  */
   11695 static int
   11696 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11697 {
   11698 	int rv;
   11699 
   11700 	if (sc->sc_type == WM_T_80003)
   11701 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11702 	else
   11703 		rv = sc->phy.acquire(sc);
   11704 	if (rv != 0) {
   11705 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11706 		    __func__);
   11707 		return rv;
   11708 	}
   11709 
   11710 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11711 
   11712 	if (sc->sc_type == WM_T_80003)
   11713 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11714 	else
   11715 		sc->phy.release(sc);
   11716 
   11717 	return rv;
   11718 }
   11719 
   11720 static int
   11721 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11722 {
   11723 
   11724 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11725 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11726 	    KUMCTRLSTA_REN);
   11727 	CSR_WRITE_FLUSH(sc);
   11728 	delay(2);
   11729 
   11730 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11731 
   11732 	return 0;
   11733 }
   11734 
   11735 /*
   11736  * wm_kmrn_writereg:
   11737  *
   11738  *	Write a kumeran register
   11739  */
   11740 static int
   11741 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11742 {
   11743 	int rv;
   11744 
   11745 	if (sc->sc_type == WM_T_80003)
   11746 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11747 	else
   11748 		rv = sc->phy.acquire(sc);
   11749 	if (rv != 0) {
   11750 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11751 		    __func__);
   11752 		return rv;
   11753 	}
   11754 
   11755 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11756 
   11757 	if (sc->sc_type == WM_T_80003)
   11758 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11759 	else
   11760 		sc->phy.release(sc);
   11761 
   11762 	return rv;
   11763 }
   11764 
   11765 static int
   11766 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11767 {
   11768 
   11769 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11770 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11771 
   11772 	return 0;
   11773 }
   11774 
   11775 /*
   11776  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11777  * This access method is different from IEEE MMD.
   11778  */
   11779 static int
   11780 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11781 {
   11782 	struct wm_softc *sc = device_private(dev);
   11783 	int rv;
   11784 
   11785 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11786 	if (rv != 0)
   11787 		return rv;
   11788 
   11789 	if (rd)
   11790 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11791 	else
   11792 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11793 	return rv;
   11794 }
   11795 
   11796 static int
   11797 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11798 {
   11799 
   11800 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11801 }
   11802 
   11803 static int
   11804 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11805 {
   11806 
   11807 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11808 }
   11809 
   11810 /* SGMII related */
   11811 
   11812 /*
   11813  * wm_sgmii_uses_mdio
   11814  *
   11815  * Check whether the transaction is to the internal PHY or the external
   11816  * MDIO interface. Return true if it's MDIO.
   11817  */
   11818 static bool
   11819 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11820 {
   11821 	uint32_t reg;
   11822 	bool ismdio = false;
   11823 
   11824 	switch (sc->sc_type) {
   11825 	case WM_T_82575:
   11826 	case WM_T_82576:
   11827 		reg = CSR_READ(sc, WMREG_MDIC);
   11828 		ismdio = ((reg & MDIC_DEST) != 0);
   11829 		break;
   11830 	case WM_T_82580:
   11831 	case WM_T_I350:
   11832 	case WM_T_I354:
   11833 	case WM_T_I210:
   11834 	case WM_T_I211:
   11835 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11836 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11837 		break;
   11838 	default:
   11839 		break;
   11840 	}
   11841 
   11842 	return ismdio;
   11843 }
   11844 
   11845 /*
   11846  * wm_sgmii_readreg:	[mii interface function]
   11847  *
   11848  *	Read a PHY register on the SGMII
   11849  * This could be handled by the PHY layer if we didn't have to lock the
   11850  * ressource ...
   11851  */
   11852 static int
   11853 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11854 {
   11855 	struct wm_softc *sc = device_private(dev);
   11856 	int rv;
   11857 
   11858 	if (sc->phy.acquire(sc)) {
   11859 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11860 		return -1;
   11861 	}
   11862 
   11863 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11864 
   11865 	sc->phy.release(sc);
   11866 	return rv;
   11867 }
   11868 
   11869 static int
   11870 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11871 {
   11872 	struct wm_softc *sc = device_private(dev);
   11873 	uint32_t i2ccmd;
   11874 	int i, rv = 0;
   11875 
   11876 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11877 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11878 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11879 
   11880 	/* Poll the ready bit */
   11881 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11882 		delay(50);
   11883 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11884 		if (i2ccmd & I2CCMD_READY)
   11885 			break;
   11886 	}
   11887 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11888 		device_printf(dev, "I2CCMD Read did not complete\n");
   11889 		rv = ETIMEDOUT;
   11890 	}
   11891 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11892 		if (!sc->phy.no_errprint)
   11893 			device_printf(dev, "I2CCMD Error bit set\n");
   11894 		rv = EIO;
   11895 	}
   11896 
   11897 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11898 
   11899 	return rv;
   11900 }
   11901 
   11902 /*
   11903  * wm_sgmii_writereg:	[mii interface function]
   11904  *
   11905  *	Write a PHY register on the SGMII.
   11906  * This could be handled by the PHY layer if we didn't have to lock the
   11907  * ressource ...
   11908  */
   11909 static int
   11910 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11911 {
   11912 	struct wm_softc *sc = device_private(dev);
   11913 	int rv;
   11914 
   11915 	if (sc->phy.acquire(sc) != 0) {
   11916 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11917 		return -1;
   11918 	}
   11919 
   11920 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11921 
   11922 	sc->phy.release(sc);
   11923 
   11924 	return rv;
   11925 }
   11926 
   11927 static int
   11928 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11929 {
   11930 	struct wm_softc *sc = device_private(dev);
   11931 	uint32_t i2ccmd;
   11932 	uint16_t swapdata;
   11933 	int rv = 0;
   11934 	int i;
   11935 
   11936 	/* Swap the data bytes for the I2C interface */
   11937 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11938 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11939 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11940 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11941 
   11942 	/* Poll the ready bit */
   11943 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11944 		delay(50);
   11945 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11946 		if (i2ccmd & I2CCMD_READY)
   11947 			break;
   11948 	}
   11949 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11950 		device_printf(dev, "I2CCMD Write did not complete\n");
   11951 		rv = ETIMEDOUT;
   11952 	}
   11953 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11954 		device_printf(dev, "I2CCMD Error bit set\n");
   11955 		rv = EIO;
   11956 	}
   11957 
   11958 	return rv;
   11959 }
   11960 
   11961 /* TBI related */
   11962 
   11963 static bool
   11964 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11965 {
   11966 	bool sig;
   11967 
   11968 	sig = ctrl & CTRL_SWDPIN(1);
   11969 
   11970 	/*
   11971 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11972 	 * detect a signal, 1 if they don't.
   11973 	 */
   11974 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11975 		sig = !sig;
   11976 
   11977 	return sig;
   11978 }
   11979 
   11980 /*
   11981  * wm_tbi_mediainit:
   11982  *
   11983  *	Initialize media for use on 1000BASE-X devices.
   11984  */
   11985 static void
   11986 wm_tbi_mediainit(struct wm_softc *sc)
   11987 {
   11988 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11989 	const char *sep = "";
   11990 
   11991 	if (sc->sc_type < WM_T_82543)
   11992 		sc->sc_tipg = TIPG_WM_DFLT;
   11993 	else
   11994 		sc->sc_tipg = TIPG_LG_DFLT;
   11995 
   11996 	sc->sc_tbi_serdes_anegticks = 5;
   11997 
   11998 	/* Initialize our media structures */
   11999 	sc->sc_mii.mii_ifp = ifp;
   12000 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12001 
   12002 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12003 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12004 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12005 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12006 		    sc->sc_core_lock);
   12007 	} else {
   12008 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12009 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12010 	}
   12011 
   12012 	/*
   12013 	 * SWD Pins:
   12014 	 *
   12015 	 *	0 = Link LED (output)
   12016 	 *	1 = Loss Of Signal (input)
   12017 	 */
   12018 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12019 
   12020 	/* XXX Perhaps this is only for TBI */
   12021 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12022 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12023 
   12024 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12025 		sc->sc_ctrl &= ~CTRL_LRST;
   12026 
   12027 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12028 
   12029 #define	ADD(ss, mm, dd)							\
   12030 do {									\
   12031 	aprint_normal("%s%s", sep, ss);					\
   12032 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12033 	sep = ", ";							\
   12034 } while (/*CONSTCOND*/0)
   12035 
   12036 	aprint_normal_dev(sc->sc_dev, "");
   12037 
   12038 	if (sc->sc_type == WM_T_I354) {
   12039 		uint32_t status;
   12040 
   12041 		status = CSR_READ(sc, WMREG_STATUS);
   12042 		if (((status & STATUS_2P5_SKU) != 0)
   12043 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12044 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12045 		} else
   12046 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12047 	} else if (sc->sc_type == WM_T_82545) {
   12048 		/* Only 82545 is LX (XXX except SFP) */
   12049 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12050 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12051 	} else if (sc->sc_sfptype != 0) {
   12052 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12053 		switch (sc->sc_sfptype) {
   12054 		default:
   12055 		case SFF_SFP_ETH_FLAGS_1000SX:
   12056 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12057 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12058 			break;
   12059 		case SFF_SFP_ETH_FLAGS_1000LX:
   12060 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12061 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12062 			break;
   12063 		case SFF_SFP_ETH_FLAGS_1000CX:
   12064 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12065 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12066 			break;
   12067 		case SFF_SFP_ETH_FLAGS_1000T:
   12068 			ADD("1000baseT", IFM_1000_T, 0);
   12069 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12070 			break;
   12071 		case SFF_SFP_ETH_FLAGS_100FX:
   12072 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12073 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12074 			break;
   12075 		}
   12076 	} else {
   12077 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12078 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12079 	}
   12080 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12081 	aprint_normal("\n");
   12082 
   12083 #undef ADD
   12084 
   12085 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12086 }
   12087 
   12088 /*
   12089  * wm_tbi_mediachange:	[ifmedia interface function]
   12090  *
   12091  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12092  */
   12093 static int
   12094 wm_tbi_mediachange(struct ifnet *ifp)
   12095 {
   12096 	struct wm_softc *sc = ifp->if_softc;
   12097 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12098 	uint32_t status, ctrl;
   12099 	bool signal;
   12100 	int i;
   12101 
   12102 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12103 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12104 		/* XXX need some work for >= 82571 and < 82575 */
   12105 		if (sc->sc_type < WM_T_82575)
   12106 			return 0;
   12107 	}
   12108 
   12109 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12110 	    || (sc->sc_type >= WM_T_82575))
   12111 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12112 
   12113 	sc->sc_ctrl &= ~CTRL_LRST;
   12114 	sc->sc_txcw = TXCW_ANE;
   12115 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12116 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12117 	else if (ife->ifm_media & IFM_FDX)
   12118 		sc->sc_txcw |= TXCW_FD;
   12119 	else
   12120 		sc->sc_txcw |= TXCW_HD;
   12121 
   12122 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12123 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12124 
   12125 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12126 		device_xname(sc->sc_dev), sc->sc_txcw));
   12127 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12128 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12129 	CSR_WRITE_FLUSH(sc);
   12130 	delay(1000);
   12131 
   12132 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12133 	signal = wm_tbi_havesignal(sc, ctrl);
   12134 
   12135 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12136 		signal));
   12137 
   12138 	if (signal) {
   12139 		/* Have signal; wait for the link to come up. */
   12140 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12141 			delay(10000);
   12142 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12143 				break;
   12144 		}
   12145 
   12146 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12147 			device_xname(sc->sc_dev), i));
   12148 
   12149 		status = CSR_READ(sc, WMREG_STATUS);
   12150 		DPRINTF(WM_DEBUG_LINK,
   12151 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12152 			device_xname(sc->sc_dev), status, STATUS_LU));
   12153 		if (status & STATUS_LU) {
   12154 			/* Link is up. */
   12155 			DPRINTF(WM_DEBUG_LINK,
   12156 			    ("%s: LINK: set media -> link up %s\n",
   12157 				device_xname(sc->sc_dev),
   12158 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12159 
   12160 			/*
   12161 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12162 			 * so we should update sc->sc_ctrl
   12163 			 */
   12164 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12165 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12166 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12167 			if (status & STATUS_FD)
   12168 				sc->sc_tctl |=
   12169 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12170 			else
   12171 				sc->sc_tctl |=
   12172 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12173 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12174 				sc->sc_fcrtl |= FCRTL_XONE;
   12175 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12176 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12177 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12178 			sc->sc_tbi_linkup = 1;
   12179 		} else {
   12180 			if (i == WM_LINKUP_TIMEOUT)
   12181 				wm_check_for_link(sc);
   12182 			/* Link is down. */
   12183 			DPRINTF(WM_DEBUG_LINK,
   12184 			    ("%s: LINK: set media -> link down\n",
   12185 				device_xname(sc->sc_dev)));
   12186 			sc->sc_tbi_linkup = 0;
   12187 		}
   12188 	} else {
   12189 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12190 			device_xname(sc->sc_dev)));
   12191 		sc->sc_tbi_linkup = 0;
   12192 	}
   12193 
   12194 	wm_tbi_serdes_set_linkled(sc);
   12195 
   12196 	return 0;
   12197 }
   12198 
   12199 /*
   12200  * wm_tbi_mediastatus:	[ifmedia interface function]
   12201  *
   12202  *	Get the current interface media status on a 1000BASE-X device.
   12203  */
   12204 static void
   12205 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12206 {
   12207 	struct wm_softc *sc = ifp->if_softc;
   12208 	uint32_t ctrl, status;
   12209 
   12210 	ifmr->ifm_status = IFM_AVALID;
   12211 	ifmr->ifm_active = IFM_ETHER;
   12212 
   12213 	status = CSR_READ(sc, WMREG_STATUS);
   12214 	if ((status & STATUS_LU) == 0) {
   12215 		ifmr->ifm_active |= IFM_NONE;
   12216 		return;
   12217 	}
   12218 
   12219 	ifmr->ifm_status |= IFM_ACTIVE;
   12220 	/* Only 82545 is LX */
   12221 	if (sc->sc_type == WM_T_82545)
   12222 		ifmr->ifm_active |= IFM_1000_LX;
   12223 	else
   12224 		ifmr->ifm_active |= IFM_1000_SX;
   12225 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12226 		ifmr->ifm_active |= IFM_FDX;
   12227 	else
   12228 		ifmr->ifm_active |= IFM_HDX;
   12229 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12230 	if (ctrl & CTRL_RFCE)
   12231 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12232 	if (ctrl & CTRL_TFCE)
   12233 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12234 }
   12235 
   12236 /* XXX TBI only */
   12237 static int
   12238 wm_check_for_link(struct wm_softc *sc)
   12239 {
   12240 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12241 	uint32_t rxcw;
   12242 	uint32_t ctrl;
   12243 	uint32_t status;
   12244 	bool signal;
   12245 
   12246 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   12247 		device_xname(sc->sc_dev), __func__));
   12248 
   12249 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12250 		/* XXX need some work for >= 82571 */
   12251 		if (sc->sc_type >= WM_T_82571) {
   12252 			sc->sc_tbi_linkup = 1;
   12253 			return 0;
   12254 		}
   12255 	}
   12256 
   12257 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12258 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12259 	status = CSR_READ(sc, WMREG_STATUS);
   12260 	signal = wm_tbi_havesignal(sc, ctrl);
   12261 
   12262 	DPRINTF(WM_DEBUG_LINK,
   12263 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12264 		device_xname(sc->sc_dev), __func__, signal,
   12265 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12266 
   12267 	/*
   12268 	 * SWDPIN   LU RXCW
   12269 	 *	0    0	  0
   12270 	 *	0    0	  1	(should not happen)
   12271 	 *	0    1	  0	(should not happen)
   12272 	 *	0    1	  1	(should not happen)
   12273 	 *	1    0	  0	Disable autonego and force linkup
   12274 	 *	1    0	  1	got /C/ but not linkup yet
   12275 	 *	1    1	  0	(linkup)
   12276 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12277 	 *
   12278 	 */
   12279 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12280 		DPRINTF(WM_DEBUG_LINK,
   12281 		    ("%s: %s: force linkup and fullduplex\n",
   12282 			device_xname(sc->sc_dev), __func__));
   12283 		sc->sc_tbi_linkup = 0;
   12284 		/* Disable auto-negotiation in the TXCW register */
   12285 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12286 
   12287 		/*
   12288 		 * Force link-up and also force full-duplex.
   12289 		 *
   12290 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12291 		 * so we should update sc->sc_ctrl
   12292 		 */
   12293 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12294 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12295 	} else if (((status & STATUS_LU) != 0)
   12296 	    && ((rxcw & RXCW_C) != 0)
   12297 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12298 		sc->sc_tbi_linkup = 1;
   12299 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12300 			device_xname(sc->sc_dev),
   12301 			__func__));
   12302 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12303 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12304 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12305 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12306 			device_xname(sc->sc_dev), __func__));
   12307 	} else {
   12308 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12309 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12310 			status));
   12311 	}
   12312 
   12313 	return 0;
   12314 }
   12315 
   12316 /*
   12317  * wm_tbi_tick:
   12318  *
   12319  *	Check the link on TBI devices.
   12320  *	This function acts as mii_tick().
   12321  */
   12322 static void
   12323 wm_tbi_tick(struct wm_softc *sc)
   12324 {
   12325 	struct mii_data *mii = &sc->sc_mii;
   12326 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12327 	uint32_t status;
   12328 
   12329 	KASSERT(WM_CORE_LOCKED(sc));
   12330 
   12331 	status = CSR_READ(sc, WMREG_STATUS);
   12332 
   12333 	/* XXX is this needed? */
   12334 	(void)CSR_READ(sc, WMREG_RXCW);
   12335 	(void)CSR_READ(sc, WMREG_CTRL);
   12336 
   12337 	/* set link status */
   12338 	if ((status & STATUS_LU) == 0) {
   12339 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12340 			device_xname(sc->sc_dev)));
   12341 		sc->sc_tbi_linkup = 0;
   12342 	} else if (sc->sc_tbi_linkup == 0) {
   12343 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12344 			device_xname(sc->sc_dev),
   12345 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12346 		sc->sc_tbi_linkup = 1;
   12347 		sc->sc_tbi_serdes_ticks = 0;
   12348 	}
   12349 
   12350 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12351 		goto setled;
   12352 
   12353 	if ((status & STATUS_LU) == 0) {
   12354 		sc->sc_tbi_linkup = 0;
   12355 		/* If the timer expired, retry autonegotiation */
   12356 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12357 		    && (++sc->sc_tbi_serdes_ticks
   12358 			>= sc->sc_tbi_serdes_anegticks)) {
   12359 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12360 				device_xname(sc->sc_dev), __func__));
   12361 			sc->sc_tbi_serdes_ticks = 0;
   12362 			/*
   12363 			 * Reset the link, and let autonegotiation do
   12364 			 * its thing
   12365 			 */
   12366 			sc->sc_ctrl |= CTRL_LRST;
   12367 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12368 			CSR_WRITE_FLUSH(sc);
   12369 			delay(1000);
   12370 			sc->sc_ctrl &= ~CTRL_LRST;
   12371 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12372 			CSR_WRITE_FLUSH(sc);
   12373 			delay(1000);
   12374 			CSR_WRITE(sc, WMREG_TXCW,
   12375 			    sc->sc_txcw & ~TXCW_ANE);
   12376 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12377 		}
   12378 	}
   12379 
   12380 setled:
   12381 	wm_tbi_serdes_set_linkled(sc);
   12382 }
   12383 
   12384 /* SERDES related */
   12385 static void
   12386 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12387 {
   12388 	uint32_t reg;
   12389 
   12390 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12391 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12392 		return;
   12393 
   12394 	/* Enable PCS to turn on link */
   12395 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12396 	reg |= PCS_CFG_PCS_EN;
   12397 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12398 
   12399 	/* Power up the laser */
   12400 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12401 	reg &= ~CTRL_EXT_SWDPIN(3);
   12402 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12403 
   12404 	/* Flush the write to verify completion */
   12405 	CSR_WRITE_FLUSH(sc);
   12406 	delay(1000);
   12407 }
   12408 
   12409 static int
   12410 wm_serdes_mediachange(struct ifnet *ifp)
   12411 {
   12412 	struct wm_softc *sc = ifp->if_softc;
   12413 	bool pcs_autoneg = true; /* XXX */
   12414 	uint32_t ctrl_ext, pcs_lctl, reg;
   12415 
   12416 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12417 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12418 		return 0;
   12419 
   12420 	/* XXX Currently, this function is not called on 8257[12] */
   12421 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12422 	    || (sc->sc_type >= WM_T_82575))
   12423 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12424 
   12425 	/* Power on the sfp cage if present */
   12426 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12427 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12428 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12429 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12430 
   12431 	sc->sc_ctrl |= CTRL_SLU;
   12432 
   12433 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12434 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12435 
   12436 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12437 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12438 	case CTRL_EXT_LINK_MODE_SGMII:
   12439 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12440 		pcs_autoneg = true;
   12441 		/* Autoneg time out should be disabled for SGMII mode */
   12442 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12443 		break;
   12444 	case CTRL_EXT_LINK_MODE_1000KX:
   12445 		pcs_autoneg = false;
   12446 		/* FALLTHROUGH */
   12447 	default:
   12448 		if ((sc->sc_type == WM_T_82575)
   12449 		    || (sc->sc_type == WM_T_82576)) {
   12450 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12451 				pcs_autoneg = false;
   12452 		}
   12453 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12454 		    | CTRL_FRCFDX;
   12455 
   12456 		/* Set speed of 1000/Full if speed/duplex is forced */
   12457 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12458 	}
   12459 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12460 
   12461 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12462 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12463 
   12464 	if (pcs_autoneg) {
   12465 		/* Set PCS register for autoneg */
   12466 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12467 
   12468 		/* Disable force flow control for autoneg */
   12469 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12470 
   12471 		/* Configure flow control advertisement for autoneg */
   12472 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12473 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12474 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12475 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12476 	} else
   12477 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12478 
   12479 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12480 
   12481 	return 0;
   12482 }
   12483 
   12484 static void
   12485 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12486 {
   12487 	struct wm_softc *sc = ifp->if_softc;
   12488 	struct mii_data *mii = &sc->sc_mii;
   12489 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12490 	uint32_t pcs_adv, pcs_lpab, reg;
   12491 
   12492 	ifmr->ifm_status = IFM_AVALID;
   12493 	ifmr->ifm_active = IFM_ETHER;
   12494 
   12495 	/* Check PCS */
   12496 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12497 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12498 		ifmr->ifm_active |= IFM_NONE;
   12499 		sc->sc_tbi_linkup = 0;
   12500 		goto setled;
   12501 	}
   12502 
   12503 	sc->sc_tbi_linkup = 1;
   12504 	ifmr->ifm_status |= IFM_ACTIVE;
   12505 	if (sc->sc_type == WM_T_I354) {
   12506 		uint32_t status;
   12507 
   12508 		status = CSR_READ(sc, WMREG_STATUS);
   12509 		if (((status & STATUS_2P5_SKU) != 0)
   12510 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12511 			ifmr->ifm_active |= IFM_2500_KX;
   12512 		} else
   12513 			ifmr->ifm_active |= IFM_1000_KX;
   12514 	} else {
   12515 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12516 		case PCS_LSTS_SPEED_10:
   12517 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12518 			break;
   12519 		case PCS_LSTS_SPEED_100:
   12520 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12521 			break;
   12522 		case PCS_LSTS_SPEED_1000:
   12523 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12524 			break;
   12525 		default:
   12526 			device_printf(sc->sc_dev, "Unknown speed\n");
   12527 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12528 			break;
   12529 		}
   12530 	}
   12531 	if ((reg & PCS_LSTS_FDX) != 0)
   12532 		ifmr->ifm_active |= IFM_FDX;
   12533 	else
   12534 		ifmr->ifm_active |= IFM_HDX;
   12535 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12536 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12537 		/* Check flow */
   12538 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12539 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12540 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12541 			goto setled;
   12542 		}
   12543 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12544 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12545 		DPRINTF(WM_DEBUG_LINK,
   12546 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12547 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12548 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12549 			mii->mii_media_active |= IFM_FLOW
   12550 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12551 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12552 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12553 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12554 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12555 			mii->mii_media_active |= IFM_FLOW
   12556 			    | IFM_ETH_TXPAUSE;
   12557 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12558 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12559 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12560 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12561 			mii->mii_media_active |= IFM_FLOW
   12562 			    | IFM_ETH_RXPAUSE;
   12563 		}
   12564 	}
   12565 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12566 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12567 setled:
   12568 	wm_tbi_serdes_set_linkled(sc);
   12569 }
   12570 
   12571 /*
   12572  * wm_serdes_tick:
   12573  *
   12574  *	Check the link on serdes devices.
   12575  */
   12576 static void
   12577 wm_serdes_tick(struct wm_softc *sc)
   12578 {
   12579 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12580 	struct mii_data *mii = &sc->sc_mii;
   12581 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12582 	uint32_t reg;
   12583 
   12584 	KASSERT(WM_CORE_LOCKED(sc));
   12585 
   12586 	mii->mii_media_status = IFM_AVALID;
   12587 	mii->mii_media_active = IFM_ETHER;
   12588 
   12589 	/* Check PCS */
   12590 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12591 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12592 		mii->mii_media_status |= IFM_ACTIVE;
   12593 		sc->sc_tbi_linkup = 1;
   12594 		sc->sc_tbi_serdes_ticks = 0;
   12595 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12596 		if ((reg & PCS_LSTS_FDX) != 0)
   12597 			mii->mii_media_active |= IFM_FDX;
   12598 		else
   12599 			mii->mii_media_active |= IFM_HDX;
   12600 	} else {
   12601 		mii->mii_media_status |= IFM_NONE;
   12602 		sc->sc_tbi_linkup = 0;
   12603 		/* If the timer expired, retry autonegotiation */
   12604 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12605 		    && (++sc->sc_tbi_serdes_ticks
   12606 			>= sc->sc_tbi_serdes_anegticks)) {
   12607 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12608 				device_xname(sc->sc_dev), __func__));
   12609 			sc->sc_tbi_serdes_ticks = 0;
   12610 			/* XXX */
   12611 			wm_serdes_mediachange(ifp);
   12612 		}
   12613 	}
   12614 
   12615 	wm_tbi_serdes_set_linkled(sc);
   12616 }
   12617 
   12618 /* SFP related */
   12619 
   12620 static int
   12621 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12622 {
   12623 	uint32_t i2ccmd;
   12624 	int i;
   12625 
   12626 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12627 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12628 
   12629 	/* Poll the ready bit */
   12630 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12631 		delay(50);
   12632 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12633 		if (i2ccmd & I2CCMD_READY)
   12634 			break;
   12635 	}
   12636 	if ((i2ccmd & I2CCMD_READY) == 0)
   12637 		return -1;
   12638 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12639 		return -1;
   12640 
   12641 	*data = i2ccmd & 0x00ff;
   12642 
   12643 	return 0;
   12644 }
   12645 
   12646 static uint32_t
   12647 wm_sfp_get_media_type(struct wm_softc *sc)
   12648 {
   12649 	uint32_t ctrl_ext;
   12650 	uint8_t val = 0;
   12651 	int timeout = 3;
   12652 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12653 	int rv = -1;
   12654 
   12655 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12656 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12657 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12658 	CSR_WRITE_FLUSH(sc);
   12659 
   12660 	/* Read SFP module data */
   12661 	while (timeout) {
   12662 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12663 		if (rv == 0)
   12664 			break;
   12665 		delay(100*1000); /* XXX too big */
   12666 		timeout--;
   12667 	}
   12668 	if (rv != 0)
   12669 		goto out;
   12670 
   12671 	switch (val) {
   12672 	case SFF_SFP_ID_SFF:
   12673 		aprint_normal_dev(sc->sc_dev,
   12674 		    "Module/Connector soldered to board\n");
   12675 		break;
   12676 	case SFF_SFP_ID_SFP:
   12677 		sc->sc_flags |= WM_F_SFP;
   12678 		break;
   12679 	case SFF_SFP_ID_UNKNOWN:
   12680 		goto out;
   12681 	default:
   12682 		break;
   12683 	}
   12684 
   12685 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12686 	if (rv != 0)
   12687 		goto out;
   12688 
   12689 	sc->sc_sfptype = val;
   12690 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12691 		mediatype = WM_MEDIATYPE_SERDES;
   12692 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12693 		sc->sc_flags |= WM_F_SGMII;
   12694 		mediatype = WM_MEDIATYPE_COPPER;
   12695 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12696 		sc->sc_flags |= WM_F_SGMII;
   12697 		mediatype = WM_MEDIATYPE_SERDES;
   12698 	} else {
   12699 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   12700 		    __func__, sc->sc_sfptype);
   12701 		sc->sc_sfptype = 0; /* XXX unknown */
   12702 	}
   12703 
   12704 out:
   12705 	/* Restore I2C interface setting */
   12706 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12707 
   12708 	return mediatype;
   12709 }
   12710 
   12711 /*
   12712  * NVM related.
   12713  * Microwire, SPI (w/wo EERD) and Flash.
   12714  */
   12715 
   12716 /* Both spi and uwire */
   12717 
   12718 /*
   12719  * wm_eeprom_sendbits:
   12720  *
   12721  *	Send a series of bits to the EEPROM.
   12722  */
   12723 static void
   12724 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12725 {
   12726 	uint32_t reg;
   12727 	int x;
   12728 
   12729 	reg = CSR_READ(sc, WMREG_EECD);
   12730 
   12731 	for (x = nbits; x > 0; x--) {
   12732 		if (bits & (1U << (x - 1)))
   12733 			reg |= EECD_DI;
   12734 		else
   12735 			reg &= ~EECD_DI;
   12736 		CSR_WRITE(sc, WMREG_EECD, reg);
   12737 		CSR_WRITE_FLUSH(sc);
   12738 		delay(2);
   12739 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12740 		CSR_WRITE_FLUSH(sc);
   12741 		delay(2);
   12742 		CSR_WRITE(sc, WMREG_EECD, reg);
   12743 		CSR_WRITE_FLUSH(sc);
   12744 		delay(2);
   12745 	}
   12746 }
   12747 
   12748 /*
   12749  * wm_eeprom_recvbits:
   12750  *
   12751  *	Receive a series of bits from the EEPROM.
   12752  */
   12753 static void
   12754 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12755 {
   12756 	uint32_t reg, val;
   12757 	int x;
   12758 
   12759 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12760 
   12761 	val = 0;
   12762 	for (x = nbits; x > 0; x--) {
   12763 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12764 		CSR_WRITE_FLUSH(sc);
   12765 		delay(2);
   12766 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12767 			val |= (1U << (x - 1));
   12768 		CSR_WRITE(sc, WMREG_EECD, reg);
   12769 		CSR_WRITE_FLUSH(sc);
   12770 		delay(2);
   12771 	}
   12772 	*valp = val;
   12773 }
   12774 
   12775 /* Microwire */
   12776 
   12777 /*
   12778  * wm_nvm_read_uwire:
   12779  *
   12780  *	Read a word from the EEPROM using the MicroWire protocol.
   12781  */
   12782 static int
   12783 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12784 {
   12785 	uint32_t reg, val;
   12786 	int i;
   12787 
   12788 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12789 		device_xname(sc->sc_dev), __func__));
   12790 
   12791 	if (sc->nvm.acquire(sc) != 0)
   12792 		return -1;
   12793 
   12794 	for (i = 0; i < wordcnt; i++) {
   12795 		/* Clear SK and DI. */
   12796 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12797 		CSR_WRITE(sc, WMREG_EECD, reg);
   12798 
   12799 		/*
   12800 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12801 		 * and Xen.
   12802 		 *
   12803 		 * We use this workaround only for 82540 because qemu's
   12804 		 * e1000 act as 82540.
   12805 		 */
   12806 		if (sc->sc_type == WM_T_82540) {
   12807 			reg |= EECD_SK;
   12808 			CSR_WRITE(sc, WMREG_EECD, reg);
   12809 			reg &= ~EECD_SK;
   12810 			CSR_WRITE(sc, WMREG_EECD, reg);
   12811 			CSR_WRITE_FLUSH(sc);
   12812 			delay(2);
   12813 		}
   12814 		/* XXX: end of workaround */
   12815 
   12816 		/* Set CHIP SELECT. */
   12817 		reg |= EECD_CS;
   12818 		CSR_WRITE(sc, WMREG_EECD, reg);
   12819 		CSR_WRITE_FLUSH(sc);
   12820 		delay(2);
   12821 
   12822 		/* Shift in the READ command. */
   12823 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12824 
   12825 		/* Shift in address. */
   12826 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12827 
   12828 		/* Shift out the data. */
   12829 		wm_eeprom_recvbits(sc, &val, 16);
   12830 		data[i] = val & 0xffff;
   12831 
   12832 		/* Clear CHIP SELECT. */
   12833 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12834 		CSR_WRITE(sc, WMREG_EECD, reg);
   12835 		CSR_WRITE_FLUSH(sc);
   12836 		delay(2);
   12837 	}
   12838 
   12839 	sc->nvm.release(sc);
   12840 	return 0;
   12841 }
   12842 
   12843 /* SPI */
   12844 
   12845 /*
   12846  * Set SPI and FLASH related information from the EECD register.
   12847  * For 82541 and 82547, the word size is taken from EEPROM.
   12848  */
   12849 static int
   12850 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12851 {
   12852 	int size;
   12853 	uint32_t reg;
   12854 	uint16_t data;
   12855 
   12856 	reg = CSR_READ(sc, WMREG_EECD);
   12857 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12858 
   12859 	/* Read the size of NVM from EECD by default */
   12860 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12861 	switch (sc->sc_type) {
   12862 	case WM_T_82541:
   12863 	case WM_T_82541_2:
   12864 	case WM_T_82547:
   12865 	case WM_T_82547_2:
   12866 		/* Set dummy value to access EEPROM */
   12867 		sc->sc_nvm_wordsize = 64;
   12868 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12869 			aprint_error_dev(sc->sc_dev,
   12870 			    "%s: failed to read EEPROM size\n", __func__);
   12871 		}
   12872 		reg = data;
   12873 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12874 		if (size == 0)
   12875 			size = 6; /* 64 word size */
   12876 		else
   12877 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12878 		break;
   12879 	case WM_T_80003:
   12880 	case WM_T_82571:
   12881 	case WM_T_82572:
   12882 	case WM_T_82573: /* SPI case */
   12883 	case WM_T_82574: /* SPI case */
   12884 	case WM_T_82583: /* SPI case */
   12885 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12886 		if (size > 14)
   12887 			size = 14;
   12888 		break;
   12889 	case WM_T_82575:
   12890 	case WM_T_82576:
   12891 	case WM_T_82580:
   12892 	case WM_T_I350:
   12893 	case WM_T_I354:
   12894 	case WM_T_I210:
   12895 	case WM_T_I211:
   12896 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12897 		if (size > 15)
   12898 			size = 15;
   12899 		break;
   12900 	default:
   12901 		aprint_error_dev(sc->sc_dev,
   12902 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12903 		return -1;
   12904 		break;
   12905 	}
   12906 
   12907 	sc->sc_nvm_wordsize = 1 << size;
   12908 
   12909 	return 0;
   12910 }
   12911 
   12912 /*
   12913  * wm_nvm_ready_spi:
   12914  *
   12915  *	Wait for a SPI EEPROM to be ready for commands.
   12916  */
   12917 static int
   12918 wm_nvm_ready_spi(struct wm_softc *sc)
   12919 {
   12920 	uint32_t val;
   12921 	int usec;
   12922 
   12923 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12924 		device_xname(sc->sc_dev), __func__));
   12925 
   12926 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12927 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12928 		wm_eeprom_recvbits(sc, &val, 8);
   12929 		if ((val & SPI_SR_RDY) == 0)
   12930 			break;
   12931 	}
   12932 	if (usec >= SPI_MAX_RETRIES) {
   12933 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12934 		return -1;
   12935 	}
   12936 	return 0;
   12937 }
   12938 
   12939 /*
   12940  * wm_nvm_read_spi:
   12941  *
   12942  *	Read a work from the EEPROM using the SPI protocol.
   12943  */
   12944 static int
   12945 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12946 {
   12947 	uint32_t reg, val;
   12948 	int i;
   12949 	uint8_t opc;
   12950 	int rv = 0;
   12951 
   12952 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12953 		device_xname(sc->sc_dev), __func__));
   12954 
   12955 	if (sc->nvm.acquire(sc) != 0)
   12956 		return -1;
   12957 
   12958 	/* Clear SK and CS. */
   12959 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12960 	CSR_WRITE(sc, WMREG_EECD, reg);
   12961 	CSR_WRITE_FLUSH(sc);
   12962 	delay(2);
   12963 
   12964 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12965 		goto out;
   12966 
   12967 	/* Toggle CS to flush commands. */
   12968 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12969 	CSR_WRITE_FLUSH(sc);
   12970 	delay(2);
   12971 	CSR_WRITE(sc, WMREG_EECD, reg);
   12972 	CSR_WRITE_FLUSH(sc);
   12973 	delay(2);
   12974 
   12975 	opc = SPI_OPC_READ;
   12976 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12977 		opc |= SPI_OPC_A8;
   12978 
   12979 	wm_eeprom_sendbits(sc, opc, 8);
   12980 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12981 
   12982 	for (i = 0; i < wordcnt; i++) {
   12983 		wm_eeprom_recvbits(sc, &val, 16);
   12984 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12985 	}
   12986 
   12987 	/* Raise CS and clear SK. */
   12988 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12989 	CSR_WRITE(sc, WMREG_EECD, reg);
   12990 	CSR_WRITE_FLUSH(sc);
   12991 	delay(2);
   12992 
   12993 out:
   12994 	sc->nvm.release(sc);
   12995 	return rv;
   12996 }
   12997 
   12998 /* Using with EERD */
   12999 
   13000 static int
   13001 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13002 {
   13003 	uint32_t attempts = 100000;
   13004 	uint32_t i, reg = 0;
   13005 	int32_t done = -1;
   13006 
   13007 	for (i = 0; i < attempts; i++) {
   13008 		reg = CSR_READ(sc, rw);
   13009 
   13010 		if (reg & EERD_DONE) {
   13011 			done = 0;
   13012 			break;
   13013 		}
   13014 		delay(5);
   13015 	}
   13016 
   13017 	return done;
   13018 }
   13019 
   13020 static int
   13021 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13022 {
   13023 	int i, eerd = 0;
   13024 	int rv = 0;
   13025 
   13026 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13027 		device_xname(sc->sc_dev), __func__));
   13028 
   13029 	if (sc->nvm.acquire(sc) != 0)
   13030 		return -1;
   13031 
   13032 	for (i = 0; i < wordcnt; i++) {
   13033 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13034 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13035 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13036 		if (rv != 0) {
   13037 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13038 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13039 			break;
   13040 		}
   13041 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13042 	}
   13043 
   13044 	sc->nvm.release(sc);
   13045 	return rv;
   13046 }
   13047 
   13048 /* Flash */
   13049 
   13050 static int
   13051 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13052 {
   13053 	uint32_t eecd;
   13054 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13055 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13056 	uint32_t nvm_dword = 0;
   13057 	uint8_t sig_byte = 0;
   13058 	int rv;
   13059 
   13060 	switch (sc->sc_type) {
   13061 	case WM_T_PCH_SPT:
   13062 	case WM_T_PCH_CNP:
   13063 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13064 		act_offset = ICH_NVM_SIG_WORD * 2;
   13065 
   13066 		/* Set bank to 0 in case flash read fails. */
   13067 		*bank = 0;
   13068 
   13069 		/* Check bank 0 */
   13070 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13071 		if (rv != 0)
   13072 			return rv;
   13073 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13074 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13075 			*bank = 0;
   13076 			return 0;
   13077 		}
   13078 
   13079 		/* Check bank 1 */
   13080 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13081 		    &nvm_dword);
   13082 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13083 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13084 			*bank = 1;
   13085 			return 0;
   13086 		}
   13087 		aprint_error_dev(sc->sc_dev,
   13088 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13089 		return -1;
   13090 	case WM_T_ICH8:
   13091 	case WM_T_ICH9:
   13092 		eecd = CSR_READ(sc, WMREG_EECD);
   13093 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13094 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13095 			return 0;
   13096 		}
   13097 		/* FALLTHROUGH */
   13098 	default:
   13099 		/* Default to 0 */
   13100 		*bank = 0;
   13101 
   13102 		/* Check bank 0 */
   13103 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13104 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13105 			*bank = 0;
   13106 			return 0;
   13107 		}
   13108 
   13109 		/* Check bank 1 */
   13110 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13111 		    &sig_byte);
   13112 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13113 			*bank = 1;
   13114 			return 0;
   13115 		}
   13116 	}
   13117 
   13118 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13119 		device_xname(sc->sc_dev)));
   13120 	return -1;
   13121 }
   13122 
   13123 /******************************************************************************
   13124  * This function does initial flash setup so that a new read/write/erase cycle
   13125  * can be started.
   13126  *
   13127  * sc - The pointer to the hw structure
   13128  ****************************************************************************/
   13129 static int32_t
   13130 wm_ich8_cycle_init(struct wm_softc *sc)
   13131 {
   13132 	uint16_t hsfsts;
   13133 	int32_t error = 1;
   13134 	int32_t i     = 0;
   13135 
   13136 	if (sc->sc_type >= WM_T_PCH_SPT)
   13137 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13138 	else
   13139 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13140 
   13141 	/* May be check the Flash Des Valid bit in Hw status */
   13142 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13143 		return error;
   13144 
   13145 	/* Clear FCERR in Hw status by writing 1 */
   13146 	/* Clear DAEL in Hw status by writing a 1 */
   13147 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13148 
   13149 	if (sc->sc_type >= WM_T_PCH_SPT)
   13150 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13151 	else
   13152 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13153 
   13154 	/*
   13155 	 * Either we should have a hardware SPI cycle in progress bit to check
   13156 	 * against, in order to start a new cycle or FDONE bit should be
   13157 	 * changed in the hardware so that it is 1 after hardware reset, which
   13158 	 * can then be used as an indication whether a cycle is in progress or
   13159 	 * has been completed .. we should also have some software semaphore
   13160 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13161 	 * threads access to those bits can be sequentiallized or a way so that
   13162 	 * 2 threads don't start the cycle at the same time
   13163 	 */
   13164 
   13165 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13166 		/*
   13167 		 * There is no cycle running at present, so we can start a
   13168 		 * cycle
   13169 		 */
   13170 
   13171 		/* Begin by setting Flash Cycle Done. */
   13172 		hsfsts |= HSFSTS_DONE;
   13173 		if (sc->sc_type >= WM_T_PCH_SPT)
   13174 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13175 			    hsfsts & 0xffffUL);
   13176 		else
   13177 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13178 		error = 0;
   13179 	} else {
   13180 		/*
   13181 		 * Otherwise poll for sometime so the current cycle has a
   13182 		 * chance to end before giving up.
   13183 		 */
   13184 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13185 			if (sc->sc_type >= WM_T_PCH_SPT)
   13186 				hsfsts = ICH8_FLASH_READ32(sc,
   13187 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13188 			else
   13189 				hsfsts = ICH8_FLASH_READ16(sc,
   13190 				    ICH_FLASH_HSFSTS);
   13191 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13192 				error = 0;
   13193 				break;
   13194 			}
   13195 			delay(1);
   13196 		}
   13197 		if (error == 0) {
   13198 			/*
   13199 			 * Successful in waiting for previous cycle to timeout,
   13200 			 * now set the Flash Cycle Done.
   13201 			 */
   13202 			hsfsts |= HSFSTS_DONE;
   13203 			if (sc->sc_type >= WM_T_PCH_SPT)
   13204 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13205 				    hsfsts & 0xffffUL);
   13206 			else
   13207 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13208 				    hsfsts);
   13209 		}
   13210 	}
   13211 	return error;
   13212 }
   13213 
   13214 /******************************************************************************
   13215  * This function starts a flash cycle and waits for its completion
   13216  *
   13217  * sc - The pointer to the hw structure
   13218  ****************************************************************************/
   13219 static int32_t
   13220 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13221 {
   13222 	uint16_t hsflctl;
   13223 	uint16_t hsfsts;
   13224 	int32_t error = 1;
   13225 	uint32_t i = 0;
   13226 
   13227 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13228 	if (sc->sc_type >= WM_T_PCH_SPT)
   13229 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13230 	else
   13231 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13232 	hsflctl |= HSFCTL_GO;
   13233 	if (sc->sc_type >= WM_T_PCH_SPT)
   13234 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13235 		    (uint32_t)hsflctl << 16);
   13236 	else
   13237 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13238 
   13239 	/* Wait till FDONE bit is set to 1 */
   13240 	do {
   13241 		if (sc->sc_type >= WM_T_PCH_SPT)
   13242 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13243 			    & 0xffffUL;
   13244 		else
   13245 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13246 		if (hsfsts & HSFSTS_DONE)
   13247 			break;
   13248 		delay(1);
   13249 		i++;
   13250 	} while (i < timeout);
   13251 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13252 		error = 0;
   13253 
   13254 	return error;
   13255 }
   13256 
   13257 /******************************************************************************
   13258  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13259  *
   13260  * sc - The pointer to the hw structure
   13261  * index - The index of the byte or word to read.
   13262  * size - Size of data to read, 1=byte 2=word, 4=dword
   13263  * data - Pointer to the word to store the value read.
   13264  *****************************************************************************/
   13265 static int32_t
   13266 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13267     uint32_t size, uint32_t *data)
   13268 {
   13269 	uint16_t hsfsts;
   13270 	uint16_t hsflctl;
   13271 	uint32_t flash_linear_address;
   13272 	uint32_t flash_data = 0;
   13273 	int32_t error = 1;
   13274 	int32_t count = 0;
   13275 
   13276 	if (size < 1  || size > 4 || data == 0x0 ||
   13277 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13278 		return error;
   13279 
   13280 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13281 	    sc->sc_ich8_flash_base;
   13282 
   13283 	do {
   13284 		delay(1);
   13285 		/* Steps */
   13286 		error = wm_ich8_cycle_init(sc);
   13287 		if (error)
   13288 			break;
   13289 
   13290 		if (sc->sc_type >= WM_T_PCH_SPT)
   13291 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13292 			    >> 16;
   13293 		else
   13294 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13295 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13296 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13297 		    & HSFCTL_BCOUNT_MASK;
   13298 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13299 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13300 			/*
   13301 			 * In SPT, This register is in Lan memory space, not
   13302 			 * flash. Therefore, only 32 bit access is supported.
   13303 			 */
   13304 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13305 			    (uint32_t)hsflctl << 16);
   13306 		} else
   13307 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13308 
   13309 		/*
   13310 		 * Write the last 24 bits of index into Flash Linear address
   13311 		 * field in Flash Address
   13312 		 */
   13313 		/* TODO: TBD maybe check the index against the size of flash */
   13314 
   13315 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13316 
   13317 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13318 
   13319 		/*
   13320 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13321 		 * the whole sequence a few more times, else read in (shift in)
   13322 		 * the Flash Data0, the order is least significant byte first
   13323 		 * msb to lsb
   13324 		 */
   13325 		if (error == 0) {
   13326 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13327 			if (size == 1)
   13328 				*data = (uint8_t)(flash_data & 0x000000FF);
   13329 			else if (size == 2)
   13330 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13331 			else if (size == 4)
   13332 				*data = (uint32_t)flash_data;
   13333 			break;
   13334 		} else {
   13335 			/*
   13336 			 * If we've gotten here, then things are probably
   13337 			 * completely hosed, but if the error condition is
   13338 			 * detected, it won't hurt to give it another try...
   13339 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13340 			 */
   13341 			if (sc->sc_type >= WM_T_PCH_SPT)
   13342 				hsfsts = ICH8_FLASH_READ32(sc,
   13343 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13344 			else
   13345 				hsfsts = ICH8_FLASH_READ16(sc,
   13346 				    ICH_FLASH_HSFSTS);
   13347 
   13348 			if (hsfsts & HSFSTS_ERR) {
   13349 				/* Repeat for some time before giving up. */
   13350 				continue;
   13351 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13352 				break;
   13353 		}
   13354 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13355 
   13356 	return error;
   13357 }
   13358 
   13359 /******************************************************************************
   13360  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13361  *
   13362  * sc - pointer to wm_hw structure
   13363  * index - The index of the byte to read.
   13364  * data - Pointer to a byte to store the value read.
   13365  *****************************************************************************/
   13366 static int32_t
   13367 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13368 {
   13369 	int32_t status;
   13370 	uint32_t word = 0;
   13371 
   13372 	status = wm_read_ich8_data(sc, index, 1, &word);
   13373 	if (status == 0)
   13374 		*data = (uint8_t)word;
   13375 	else
   13376 		*data = 0;
   13377 
   13378 	return status;
   13379 }
   13380 
   13381 /******************************************************************************
   13382  * Reads a word from the NVM using the ICH8 flash access registers.
   13383  *
   13384  * sc - pointer to wm_hw structure
   13385  * index - The starting byte index of the word to read.
   13386  * data - Pointer to a word to store the value read.
   13387  *****************************************************************************/
   13388 static int32_t
   13389 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13390 {
   13391 	int32_t status;
   13392 	uint32_t word = 0;
   13393 
   13394 	status = wm_read_ich8_data(sc, index, 2, &word);
   13395 	if (status == 0)
   13396 		*data = (uint16_t)word;
   13397 	else
   13398 		*data = 0;
   13399 
   13400 	return status;
   13401 }
   13402 
   13403 /******************************************************************************
   13404  * Reads a dword from the NVM using the ICH8 flash access registers.
   13405  *
   13406  * sc - pointer to wm_hw structure
   13407  * index - The starting byte index of the word to read.
   13408  * data - Pointer to a word to store the value read.
   13409  *****************************************************************************/
   13410 static int32_t
   13411 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13412 {
   13413 	int32_t status;
   13414 
   13415 	status = wm_read_ich8_data(sc, index, 4, data);
   13416 	return status;
   13417 }
   13418 
   13419 /******************************************************************************
   13420  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13421  * register.
   13422  *
   13423  * sc - Struct containing variables accessed by shared code
   13424  * offset - offset of word in the EEPROM to read
   13425  * data - word read from the EEPROM
   13426  * words - number of words to read
   13427  *****************************************************************************/
   13428 static int
   13429 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13430 {
   13431 	int32_t	 rv = 0;
   13432 	uint32_t flash_bank = 0;
   13433 	uint32_t act_offset = 0;
   13434 	uint32_t bank_offset = 0;
   13435 	uint16_t word = 0;
   13436 	uint16_t i = 0;
   13437 
   13438 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13439 		device_xname(sc->sc_dev), __func__));
   13440 
   13441 	if (sc->nvm.acquire(sc) != 0)
   13442 		return -1;
   13443 
   13444 	/*
   13445 	 * We need to know which is the valid flash bank.  In the event
   13446 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13447 	 * managing flash_bank. So it cannot be trusted and needs
   13448 	 * to be updated with each read.
   13449 	 */
   13450 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13451 	if (rv) {
   13452 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13453 			device_xname(sc->sc_dev)));
   13454 		flash_bank = 0;
   13455 	}
   13456 
   13457 	/*
   13458 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13459 	 * size
   13460 	 */
   13461 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13462 
   13463 	for (i = 0; i < words; i++) {
   13464 		/* The NVM part needs a byte offset, hence * 2 */
   13465 		act_offset = bank_offset + ((offset + i) * 2);
   13466 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13467 		if (rv) {
   13468 			aprint_error_dev(sc->sc_dev,
   13469 			    "%s: failed to read NVM\n", __func__);
   13470 			break;
   13471 		}
   13472 		data[i] = word;
   13473 	}
   13474 
   13475 	sc->nvm.release(sc);
   13476 	return rv;
   13477 }
   13478 
   13479 /******************************************************************************
   13480  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13481  * register.
   13482  *
   13483  * sc - Struct containing variables accessed by shared code
   13484  * offset - offset of word in the EEPROM to read
   13485  * data - word read from the EEPROM
   13486  * words - number of words to read
   13487  *****************************************************************************/
   13488 static int
   13489 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13490 {
   13491 	int32_t	 rv = 0;
   13492 	uint32_t flash_bank = 0;
   13493 	uint32_t act_offset = 0;
   13494 	uint32_t bank_offset = 0;
   13495 	uint32_t dword = 0;
   13496 	uint16_t i = 0;
   13497 
   13498 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13499 		device_xname(sc->sc_dev), __func__));
   13500 
   13501 	if (sc->nvm.acquire(sc) != 0)
   13502 		return -1;
   13503 
   13504 	/*
   13505 	 * We need to know which is the valid flash bank.  In the event
   13506 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13507 	 * managing flash_bank. So it cannot be trusted and needs
   13508 	 * to be updated with each read.
   13509 	 */
   13510 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13511 	if (rv) {
   13512 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13513 			device_xname(sc->sc_dev)));
   13514 		flash_bank = 0;
   13515 	}
   13516 
   13517 	/*
   13518 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13519 	 * size
   13520 	 */
   13521 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13522 
   13523 	for (i = 0; i < words; i++) {
   13524 		/* The NVM part needs a byte offset, hence * 2 */
   13525 		act_offset = bank_offset + ((offset + i) * 2);
   13526 		/* but we must read dword aligned, so mask ... */
   13527 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13528 		if (rv) {
   13529 			aprint_error_dev(sc->sc_dev,
   13530 			    "%s: failed to read NVM\n", __func__);
   13531 			break;
   13532 		}
   13533 		/* ... and pick out low or high word */
   13534 		if ((act_offset & 0x2) == 0)
   13535 			data[i] = (uint16_t)(dword & 0xFFFF);
   13536 		else
   13537 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13538 	}
   13539 
   13540 	sc->nvm.release(sc);
   13541 	return rv;
   13542 }
   13543 
   13544 /* iNVM */
   13545 
   13546 static int
   13547 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13548 {
   13549 	int32_t	 rv = 0;
   13550 	uint32_t invm_dword;
   13551 	uint16_t i;
   13552 	uint8_t record_type, word_address;
   13553 
   13554 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13555 		device_xname(sc->sc_dev), __func__));
   13556 
   13557 	for (i = 0; i < INVM_SIZE; i++) {
   13558 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13559 		/* Get record type */
   13560 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13561 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13562 			break;
   13563 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13564 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13565 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13566 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13567 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13568 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13569 			if (word_address == address) {
   13570 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13571 				rv = 0;
   13572 				break;
   13573 			}
   13574 		}
   13575 	}
   13576 
   13577 	return rv;
   13578 }
   13579 
   13580 static int
   13581 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13582 {
   13583 	int rv = 0;
   13584 	int i;
   13585 
   13586 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13587 		device_xname(sc->sc_dev), __func__));
   13588 
   13589 	if (sc->nvm.acquire(sc) != 0)
   13590 		return -1;
   13591 
   13592 	for (i = 0; i < words; i++) {
   13593 		switch (offset + i) {
   13594 		case NVM_OFF_MACADDR:
   13595 		case NVM_OFF_MACADDR1:
   13596 		case NVM_OFF_MACADDR2:
   13597 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13598 			if (rv != 0) {
   13599 				data[i] = 0xffff;
   13600 				rv = -1;
   13601 			}
   13602 			break;
   13603 		case NVM_OFF_CFG2:
   13604 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13605 			if (rv != 0) {
   13606 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13607 				rv = 0;
   13608 			}
   13609 			break;
   13610 		case NVM_OFF_CFG4:
   13611 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13612 			if (rv != 0) {
   13613 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13614 				rv = 0;
   13615 			}
   13616 			break;
   13617 		case NVM_OFF_LED_1_CFG:
   13618 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13619 			if (rv != 0) {
   13620 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13621 				rv = 0;
   13622 			}
   13623 			break;
   13624 		case NVM_OFF_LED_0_2_CFG:
   13625 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13626 			if (rv != 0) {
   13627 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13628 				rv = 0;
   13629 			}
   13630 			break;
   13631 		case NVM_OFF_ID_LED_SETTINGS:
   13632 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13633 			if (rv != 0) {
   13634 				*data = ID_LED_RESERVED_FFFF;
   13635 				rv = 0;
   13636 			}
   13637 			break;
   13638 		default:
   13639 			DPRINTF(WM_DEBUG_NVM,
   13640 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13641 			*data = NVM_RESERVED_WORD;
   13642 			break;
   13643 		}
   13644 	}
   13645 
   13646 	sc->nvm.release(sc);
   13647 	return rv;
   13648 }
   13649 
   13650 /* Lock, detecting NVM type, validate checksum, version and read */
   13651 
   13652 static int
   13653 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13654 {
   13655 	uint32_t eecd = 0;
   13656 
   13657 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13658 	    || sc->sc_type == WM_T_82583) {
   13659 		eecd = CSR_READ(sc, WMREG_EECD);
   13660 
   13661 		/* Isolate bits 15 & 16 */
   13662 		eecd = ((eecd >> 15) & 0x03);
   13663 
   13664 		/* If both bits are set, device is Flash type */
   13665 		if (eecd == 0x03)
   13666 			return 0;
   13667 	}
   13668 	return 1;
   13669 }
   13670 
   13671 static int
   13672 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13673 {
   13674 	uint32_t eec;
   13675 
   13676 	eec = CSR_READ(sc, WMREG_EEC);
   13677 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13678 		return 1;
   13679 
   13680 	return 0;
   13681 }
   13682 
   13683 /*
   13684  * wm_nvm_validate_checksum
   13685  *
   13686  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13687  */
   13688 static int
   13689 wm_nvm_validate_checksum(struct wm_softc *sc)
   13690 {
   13691 	uint16_t checksum;
   13692 	uint16_t eeprom_data;
   13693 #ifdef WM_DEBUG
   13694 	uint16_t csum_wordaddr, valid_checksum;
   13695 #endif
   13696 	int i;
   13697 
   13698 	checksum = 0;
   13699 
   13700 	/* Don't check for I211 */
   13701 	if (sc->sc_type == WM_T_I211)
   13702 		return 0;
   13703 
   13704 #ifdef WM_DEBUG
   13705 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13706 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13707 		csum_wordaddr = NVM_OFF_COMPAT;
   13708 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13709 	} else {
   13710 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13711 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13712 	}
   13713 
   13714 	/* Dump EEPROM image for debug */
   13715 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13716 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13717 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13718 		/* XXX PCH_SPT? */
   13719 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13720 		if ((eeprom_data & valid_checksum) == 0)
   13721 			DPRINTF(WM_DEBUG_NVM,
   13722 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13723 				device_xname(sc->sc_dev), eeprom_data,
   13724 				    valid_checksum));
   13725 	}
   13726 
   13727 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13728 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13729 		for (i = 0; i < NVM_SIZE; i++) {
   13730 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13731 				printf("XXXX ");
   13732 			else
   13733 				printf("%04hx ", eeprom_data);
   13734 			if (i % 8 == 7)
   13735 				printf("\n");
   13736 		}
   13737 	}
   13738 
   13739 #endif /* WM_DEBUG */
   13740 
   13741 	for (i = 0; i < NVM_SIZE; i++) {
   13742 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13743 			return 1;
   13744 		checksum += eeprom_data;
   13745 	}
   13746 
   13747 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13748 #ifdef WM_DEBUG
   13749 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13750 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13751 #endif
   13752 	}
   13753 
   13754 	return 0;
   13755 }
   13756 
   13757 static void
   13758 wm_nvm_version_invm(struct wm_softc *sc)
   13759 {
   13760 	uint32_t dword;
   13761 
   13762 	/*
   13763 	 * Linux's code to decode version is very strange, so we don't
   13764 	 * obey that algorithm and just use word 61 as the document.
   13765 	 * Perhaps it's not perfect though...
   13766 	 *
   13767 	 * Example:
   13768 	 *
   13769 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13770 	 */
   13771 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13772 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13773 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13774 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13775 }
   13776 
   13777 static void
   13778 wm_nvm_version(struct wm_softc *sc)
   13779 {
   13780 	uint16_t major, minor, build, patch;
   13781 	uint16_t uid0, uid1;
   13782 	uint16_t nvm_data;
   13783 	uint16_t off;
   13784 	bool check_version = false;
   13785 	bool check_optionrom = false;
   13786 	bool have_build = false;
   13787 	bool have_uid = true;
   13788 
   13789 	/*
   13790 	 * Version format:
   13791 	 *
   13792 	 * XYYZ
   13793 	 * X0YZ
   13794 	 * X0YY
   13795 	 *
   13796 	 * Example:
   13797 	 *
   13798 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13799 	 *	82571	0x50a6	5.10.6?
   13800 	 *	82572	0x506a	5.6.10?
   13801 	 *	82572EI	0x5069	5.6.9?
   13802 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13803 	 *		0x2013	2.1.3?
   13804 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   13805 	 * ICH8+82567	0x0040	0.4.0?
   13806 	 * ICH9+82566	0x1040	1.4.0?
   13807 	 *ICH10+82567	0x0043	0.4.3?
   13808 	 *  PCH+82577	0x00c1	0.12.1?
   13809 	 * PCH2+82579	0x00d3	0.13.3?
   13810 	 *		0x00d4	0.13.4?
   13811 	 *  LPT+I218	0x0023	0.2.3?
   13812 	 *  SPT+I219	0x0084	0.8.4?
   13813 	 *  CNP+I219	0x0054	0.5.4?
   13814 	 */
   13815 
   13816 	/*
   13817 	 * XXX
   13818 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13819 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13820 	 */
   13821 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13822 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13823 		have_uid = false;
   13824 
   13825 	switch (sc->sc_type) {
   13826 	case WM_T_82571:
   13827 	case WM_T_82572:
   13828 	case WM_T_82574:
   13829 	case WM_T_82583:
   13830 		check_version = true;
   13831 		check_optionrom = true;
   13832 		have_build = true;
   13833 		break;
   13834 	case WM_T_ICH8:
   13835 	case WM_T_ICH9:
   13836 	case WM_T_ICH10:
   13837 	case WM_T_PCH:
   13838 	case WM_T_PCH2:
   13839 	case WM_T_PCH_LPT:
   13840 	case WM_T_PCH_SPT:
   13841 	case WM_T_PCH_CNP:
   13842 		check_version = true;
   13843 		have_build = true;
   13844 		have_uid = false;
   13845 		break;
   13846 	case WM_T_82575:
   13847 	case WM_T_82576:
   13848 	case WM_T_82580:
   13849 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13850 			check_version = true;
   13851 		break;
   13852 	case WM_T_I211:
   13853 		wm_nvm_version_invm(sc);
   13854 		have_uid = false;
   13855 		goto printver;
   13856 	case WM_T_I210:
   13857 		if (!wm_nvm_flash_presence_i210(sc)) {
   13858 			wm_nvm_version_invm(sc);
   13859 			have_uid = false;
   13860 			goto printver;
   13861 		}
   13862 		/* FALLTHROUGH */
   13863 	case WM_T_I350:
   13864 	case WM_T_I354:
   13865 		check_version = true;
   13866 		check_optionrom = true;
   13867 		break;
   13868 	default:
   13869 		return;
   13870 	}
   13871 	if (check_version
   13872 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13873 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13874 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13875 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13876 			build = nvm_data & NVM_BUILD_MASK;
   13877 			have_build = true;
   13878 		} else
   13879 			minor = nvm_data & 0x00ff;
   13880 
   13881 		/* Decimal */
   13882 		minor = (minor / 16) * 10 + (minor % 16);
   13883 		sc->sc_nvm_ver_major = major;
   13884 		sc->sc_nvm_ver_minor = minor;
   13885 
   13886 printver:
   13887 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13888 		    sc->sc_nvm_ver_minor);
   13889 		if (have_build) {
   13890 			sc->sc_nvm_ver_build = build;
   13891 			aprint_verbose(".%d", build);
   13892 		}
   13893 	}
   13894 
   13895 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13896 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13897 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13898 		/* Option ROM Version */
   13899 		if ((off != 0x0000) && (off != 0xffff)) {
   13900 			int rv;
   13901 
   13902 			off += NVM_COMBO_VER_OFF;
   13903 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13904 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13905 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13906 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13907 				/* 16bits */
   13908 				major = uid0 >> 8;
   13909 				build = (uid0 << 8) | (uid1 >> 8);
   13910 				patch = uid1 & 0x00ff;
   13911 				aprint_verbose(", option ROM Version %d.%d.%d",
   13912 				    major, build, patch);
   13913 			}
   13914 		}
   13915 	}
   13916 
   13917 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13918 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13919 }
   13920 
   13921 /*
   13922  * wm_nvm_read:
   13923  *
   13924  *	Read data from the serial EEPROM.
   13925  */
   13926 static int
   13927 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13928 {
   13929 	int rv;
   13930 
   13931 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13932 		device_xname(sc->sc_dev), __func__));
   13933 
   13934 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13935 		return -1;
   13936 
   13937 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13938 
   13939 	return rv;
   13940 }
   13941 
   13942 /*
   13943  * Hardware semaphores.
   13944  * Very complexed...
   13945  */
   13946 
   13947 static int
   13948 wm_get_null(struct wm_softc *sc)
   13949 {
   13950 
   13951 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13952 		device_xname(sc->sc_dev), __func__));
   13953 	return 0;
   13954 }
   13955 
   13956 static void
   13957 wm_put_null(struct wm_softc *sc)
   13958 {
   13959 
   13960 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13961 		device_xname(sc->sc_dev), __func__));
   13962 	return;
   13963 }
   13964 
   13965 static int
   13966 wm_get_eecd(struct wm_softc *sc)
   13967 {
   13968 	uint32_t reg;
   13969 	int x;
   13970 
   13971 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13972 		device_xname(sc->sc_dev), __func__));
   13973 
   13974 	reg = CSR_READ(sc, WMREG_EECD);
   13975 
   13976 	/* Request EEPROM access. */
   13977 	reg |= EECD_EE_REQ;
   13978 	CSR_WRITE(sc, WMREG_EECD, reg);
   13979 
   13980 	/* ..and wait for it to be granted. */
   13981 	for (x = 0; x < 1000; x++) {
   13982 		reg = CSR_READ(sc, WMREG_EECD);
   13983 		if (reg & EECD_EE_GNT)
   13984 			break;
   13985 		delay(5);
   13986 	}
   13987 	if ((reg & EECD_EE_GNT) == 0) {
   13988 		aprint_error_dev(sc->sc_dev,
   13989 		    "could not acquire EEPROM GNT\n");
   13990 		reg &= ~EECD_EE_REQ;
   13991 		CSR_WRITE(sc, WMREG_EECD, reg);
   13992 		return -1;
   13993 	}
   13994 
   13995 	return 0;
   13996 }
   13997 
   13998 static void
   13999 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14000 {
   14001 
   14002 	*eecd |= EECD_SK;
   14003 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14004 	CSR_WRITE_FLUSH(sc);
   14005 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14006 		delay(1);
   14007 	else
   14008 		delay(50);
   14009 }
   14010 
   14011 static void
   14012 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14013 {
   14014 
   14015 	*eecd &= ~EECD_SK;
   14016 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14017 	CSR_WRITE_FLUSH(sc);
   14018 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14019 		delay(1);
   14020 	else
   14021 		delay(50);
   14022 }
   14023 
   14024 static void
   14025 wm_put_eecd(struct wm_softc *sc)
   14026 {
   14027 	uint32_t reg;
   14028 
   14029 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14030 		device_xname(sc->sc_dev), __func__));
   14031 
   14032 	/* Stop nvm */
   14033 	reg = CSR_READ(sc, WMREG_EECD);
   14034 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14035 		/* Pull CS high */
   14036 		reg |= EECD_CS;
   14037 		wm_nvm_eec_clock_lower(sc, &reg);
   14038 	} else {
   14039 		/* CS on Microwire is active-high */
   14040 		reg &= ~(EECD_CS | EECD_DI);
   14041 		CSR_WRITE(sc, WMREG_EECD, reg);
   14042 		wm_nvm_eec_clock_raise(sc, &reg);
   14043 		wm_nvm_eec_clock_lower(sc, &reg);
   14044 	}
   14045 
   14046 	reg = CSR_READ(sc, WMREG_EECD);
   14047 	reg &= ~EECD_EE_REQ;
   14048 	CSR_WRITE(sc, WMREG_EECD, reg);
   14049 
   14050 	return;
   14051 }
   14052 
   14053 /*
   14054  * Get hardware semaphore.
   14055  * Same as e1000_get_hw_semaphore_generic()
   14056  */
   14057 static int
   14058 wm_get_swsm_semaphore(struct wm_softc *sc)
   14059 {
   14060 	int32_t timeout;
   14061 	uint32_t swsm;
   14062 
   14063 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14064 		device_xname(sc->sc_dev), __func__));
   14065 	KASSERT(sc->sc_nvm_wordsize > 0);
   14066 
   14067 retry:
   14068 	/* Get the SW semaphore. */
   14069 	timeout = sc->sc_nvm_wordsize + 1;
   14070 	while (timeout) {
   14071 		swsm = CSR_READ(sc, WMREG_SWSM);
   14072 
   14073 		if ((swsm & SWSM_SMBI) == 0)
   14074 			break;
   14075 
   14076 		delay(50);
   14077 		timeout--;
   14078 	}
   14079 
   14080 	if (timeout == 0) {
   14081 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14082 			/*
   14083 			 * In rare circumstances, the SW semaphore may already
   14084 			 * be held unintentionally. Clear the semaphore once
   14085 			 * before giving up.
   14086 			 */
   14087 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14088 			wm_put_swsm_semaphore(sc);
   14089 			goto retry;
   14090 		}
   14091 		aprint_error_dev(sc->sc_dev,
   14092 		    "could not acquire SWSM SMBI\n");
   14093 		return 1;
   14094 	}
   14095 
   14096 	/* Get the FW semaphore. */
   14097 	timeout = sc->sc_nvm_wordsize + 1;
   14098 	while (timeout) {
   14099 		swsm = CSR_READ(sc, WMREG_SWSM);
   14100 		swsm |= SWSM_SWESMBI;
   14101 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14102 		/* If we managed to set the bit we got the semaphore. */
   14103 		swsm = CSR_READ(sc, WMREG_SWSM);
   14104 		if (swsm & SWSM_SWESMBI)
   14105 			break;
   14106 
   14107 		delay(50);
   14108 		timeout--;
   14109 	}
   14110 
   14111 	if (timeout == 0) {
   14112 		aprint_error_dev(sc->sc_dev,
   14113 		    "could not acquire SWSM SWESMBI\n");
   14114 		/* Release semaphores */
   14115 		wm_put_swsm_semaphore(sc);
   14116 		return 1;
   14117 	}
   14118 	return 0;
   14119 }
   14120 
   14121 /*
   14122  * Put hardware semaphore.
   14123  * Same as e1000_put_hw_semaphore_generic()
   14124  */
   14125 static void
   14126 wm_put_swsm_semaphore(struct wm_softc *sc)
   14127 {
   14128 	uint32_t swsm;
   14129 
   14130 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14131 		device_xname(sc->sc_dev), __func__));
   14132 
   14133 	swsm = CSR_READ(sc, WMREG_SWSM);
   14134 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14135 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14136 }
   14137 
   14138 /*
   14139  * Get SW/FW semaphore.
   14140  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14141  */
   14142 static int
   14143 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14144 {
   14145 	uint32_t swfw_sync;
   14146 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14147 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14148 	int timeout;
   14149 
   14150 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14151 		device_xname(sc->sc_dev), __func__));
   14152 
   14153 	if (sc->sc_type == WM_T_80003)
   14154 		timeout = 50;
   14155 	else
   14156 		timeout = 200;
   14157 
   14158 	while (timeout) {
   14159 		if (wm_get_swsm_semaphore(sc)) {
   14160 			aprint_error_dev(sc->sc_dev,
   14161 			    "%s: failed to get semaphore\n",
   14162 			    __func__);
   14163 			return 1;
   14164 		}
   14165 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14166 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14167 			swfw_sync |= swmask;
   14168 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14169 			wm_put_swsm_semaphore(sc);
   14170 			return 0;
   14171 		}
   14172 		wm_put_swsm_semaphore(sc);
   14173 		delay(5000);
   14174 		timeout--;
   14175 	}
   14176 	device_printf(sc->sc_dev,
   14177 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14178 	    mask, swfw_sync);
   14179 	return 1;
   14180 }
   14181 
   14182 static void
   14183 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14184 {
   14185 	uint32_t swfw_sync;
   14186 
   14187 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14188 		device_xname(sc->sc_dev), __func__));
   14189 
   14190 	while (wm_get_swsm_semaphore(sc) != 0)
   14191 		continue;
   14192 
   14193 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14194 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14195 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14196 
   14197 	wm_put_swsm_semaphore(sc);
   14198 }
   14199 
   14200 static int
   14201 wm_get_nvm_80003(struct wm_softc *sc)
   14202 {
   14203 	int rv;
   14204 
   14205 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14206 		device_xname(sc->sc_dev), __func__));
   14207 
   14208 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14209 		aprint_error_dev(sc->sc_dev,
   14210 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14211 		return rv;
   14212 	}
   14213 
   14214 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14215 	    && (rv = wm_get_eecd(sc)) != 0) {
   14216 		aprint_error_dev(sc->sc_dev,
   14217 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14218 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14219 		return rv;
   14220 	}
   14221 
   14222 	return 0;
   14223 }
   14224 
   14225 static void
   14226 wm_put_nvm_80003(struct wm_softc *sc)
   14227 {
   14228 
   14229 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14230 		device_xname(sc->sc_dev), __func__));
   14231 
   14232 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14233 		wm_put_eecd(sc);
   14234 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14235 }
   14236 
   14237 static int
   14238 wm_get_nvm_82571(struct wm_softc *sc)
   14239 {
   14240 	int rv;
   14241 
   14242 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14243 		device_xname(sc->sc_dev), __func__));
   14244 
   14245 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14246 		return rv;
   14247 
   14248 	switch (sc->sc_type) {
   14249 	case WM_T_82573:
   14250 		break;
   14251 	default:
   14252 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14253 			rv = wm_get_eecd(sc);
   14254 		break;
   14255 	}
   14256 
   14257 	if (rv != 0) {
   14258 		aprint_error_dev(sc->sc_dev,
   14259 		    "%s: failed to get semaphore\n",
   14260 		    __func__);
   14261 		wm_put_swsm_semaphore(sc);
   14262 	}
   14263 
   14264 	return rv;
   14265 }
   14266 
   14267 static void
   14268 wm_put_nvm_82571(struct wm_softc *sc)
   14269 {
   14270 
   14271 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14272 		device_xname(sc->sc_dev), __func__));
   14273 
   14274 	switch (sc->sc_type) {
   14275 	case WM_T_82573:
   14276 		break;
   14277 	default:
   14278 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14279 			wm_put_eecd(sc);
   14280 		break;
   14281 	}
   14282 
   14283 	wm_put_swsm_semaphore(sc);
   14284 }
   14285 
   14286 static int
   14287 wm_get_phy_82575(struct wm_softc *sc)
   14288 {
   14289 
   14290 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14291 		device_xname(sc->sc_dev), __func__));
   14292 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14293 }
   14294 
   14295 static void
   14296 wm_put_phy_82575(struct wm_softc *sc)
   14297 {
   14298 
   14299 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14300 		device_xname(sc->sc_dev), __func__));
   14301 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14302 }
   14303 
   14304 static int
   14305 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14306 {
   14307 	uint32_t ext_ctrl;
   14308 	int timeout = 200;
   14309 
   14310 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14311 		device_xname(sc->sc_dev), __func__));
   14312 
   14313 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14314 	for (timeout = 0; timeout < 200; timeout++) {
   14315 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14316 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14317 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14318 
   14319 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14320 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14321 			return 0;
   14322 		delay(5000);
   14323 	}
   14324 	device_printf(sc->sc_dev,
   14325 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14326 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14327 	return 1;
   14328 }
   14329 
   14330 static void
   14331 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14332 {
   14333 	uint32_t ext_ctrl;
   14334 
   14335 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14336 		device_xname(sc->sc_dev), __func__));
   14337 
   14338 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14339 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14340 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14341 
   14342 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14343 }
   14344 
   14345 static int
   14346 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14347 {
   14348 	uint32_t ext_ctrl;
   14349 	int timeout;
   14350 
   14351 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14352 		device_xname(sc->sc_dev), __func__));
   14353 	mutex_enter(sc->sc_ich_phymtx);
   14354 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14355 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14356 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14357 			break;
   14358 		delay(1000);
   14359 	}
   14360 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14361 		device_printf(sc->sc_dev,
   14362 		    "SW has already locked the resource\n");
   14363 		goto out;
   14364 	}
   14365 
   14366 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14367 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14368 	for (timeout = 0; timeout < 1000; timeout++) {
   14369 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14370 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14371 			break;
   14372 		delay(1000);
   14373 	}
   14374 	if (timeout >= 1000) {
   14375 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14376 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14377 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14378 		goto out;
   14379 	}
   14380 	return 0;
   14381 
   14382 out:
   14383 	mutex_exit(sc->sc_ich_phymtx);
   14384 	return 1;
   14385 }
   14386 
   14387 static void
   14388 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14389 {
   14390 	uint32_t ext_ctrl;
   14391 
   14392 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14393 		device_xname(sc->sc_dev), __func__));
   14394 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14395 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14396 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14397 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14398 	} else {
   14399 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14400 	}
   14401 
   14402 	mutex_exit(sc->sc_ich_phymtx);
   14403 }
   14404 
   14405 static int
   14406 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14407 {
   14408 
   14409 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14410 		device_xname(sc->sc_dev), __func__));
   14411 	mutex_enter(sc->sc_ich_nvmmtx);
   14412 
   14413 	return 0;
   14414 }
   14415 
   14416 static void
   14417 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14418 {
   14419 
   14420 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14421 		device_xname(sc->sc_dev), __func__));
   14422 	mutex_exit(sc->sc_ich_nvmmtx);
   14423 }
   14424 
   14425 static int
   14426 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14427 {
   14428 	int i = 0;
   14429 	uint32_t reg;
   14430 
   14431 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14432 		device_xname(sc->sc_dev), __func__));
   14433 
   14434 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14435 	do {
   14436 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14437 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14438 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14439 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14440 			break;
   14441 		delay(2*1000);
   14442 		i++;
   14443 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14444 
   14445 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14446 		wm_put_hw_semaphore_82573(sc);
   14447 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14448 		    device_xname(sc->sc_dev));
   14449 		return -1;
   14450 	}
   14451 
   14452 	return 0;
   14453 }
   14454 
   14455 static void
   14456 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14457 {
   14458 	uint32_t reg;
   14459 
   14460 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14461 		device_xname(sc->sc_dev), __func__));
   14462 
   14463 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14464 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14465 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14466 }
   14467 
   14468 /*
   14469  * Management mode and power management related subroutines.
   14470  * BMC, AMT, suspend/resume and EEE.
   14471  */
   14472 
   14473 #ifdef WM_WOL
   14474 static int
   14475 wm_check_mng_mode(struct wm_softc *sc)
   14476 {
   14477 	int rv;
   14478 
   14479 	switch (sc->sc_type) {
   14480 	case WM_T_ICH8:
   14481 	case WM_T_ICH9:
   14482 	case WM_T_ICH10:
   14483 	case WM_T_PCH:
   14484 	case WM_T_PCH2:
   14485 	case WM_T_PCH_LPT:
   14486 	case WM_T_PCH_SPT:
   14487 	case WM_T_PCH_CNP:
   14488 		rv = wm_check_mng_mode_ich8lan(sc);
   14489 		break;
   14490 	case WM_T_82574:
   14491 	case WM_T_82583:
   14492 		rv = wm_check_mng_mode_82574(sc);
   14493 		break;
   14494 	case WM_T_82571:
   14495 	case WM_T_82572:
   14496 	case WM_T_82573:
   14497 	case WM_T_80003:
   14498 		rv = wm_check_mng_mode_generic(sc);
   14499 		break;
   14500 	default:
   14501 		/* Noting to do */
   14502 		rv = 0;
   14503 		break;
   14504 	}
   14505 
   14506 	return rv;
   14507 }
   14508 
   14509 static int
   14510 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14511 {
   14512 	uint32_t fwsm;
   14513 
   14514 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14515 
   14516 	if (((fwsm & FWSM_FW_VALID) != 0)
   14517 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14518 		return 1;
   14519 
   14520 	return 0;
   14521 }
   14522 
   14523 static int
   14524 wm_check_mng_mode_82574(struct wm_softc *sc)
   14525 {
   14526 	uint16_t data;
   14527 
   14528 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14529 
   14530 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14531 		return 1;
   14532 
   14533 	return 0;
   14534 }
   14535 
   14536 static int
   14537 wm_check_mng_mode_generic(struct wm_softc *sc)
   14538 {
   14539 	uint32_t fwsm;
   14540 
   14541 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14542 
   14543 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14544 		return 1;
   14545 
   14546 	return 0;
   14547 }
   14548 #endif /* WM_WOL */
   14549 
   14550 static int
   14551 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14552 {
   14553 	uint32_t manc, fwsm, factps;
   14554 
   14555 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14556 		return 0;
   14557 
   14558 	manc = CSR_READ(sc, WMREG_MANC);
   14559 
   14560 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14561 		device_xname(sc->sc_dev), manc));
   14562 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14563 		return 0;
   14564 
   14565 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14566 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14567 		factps = CSR_READ(sc, WMREG_FACTPS);
   14568 		if (((factps & FACTPS_MNGCG) == 0)
   14569 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14570 			return 1;
   14571 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14572 		uint16_t data;
   14573 
   14574 		factps = CSR_READ(sc, WMREG_FACTPS);
   14575 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14576 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14577 			device_xname(sc->sc_dev), factps, data));
   14578 		if (((factps & FACTPS_MNGCG) == 0)
   14579 		    && ((data & NVM_CFG2_MNGM_MASK)
   14580 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14581 			return 1;
   14582 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14583 	    && ((manc & MANC_ASF_EN) == 0))
   14584 		return 1;
   14585 
   14586 	return 0;
   14587 }
   14588 
   14589 static bool
   14590 wm_phy_resetisblocked(struct wm_softc *sc)
   14591 {
   14592 	bool blocked = false;
   14593 	uint32_t reg;
   14594 	int i = 0;
   14595 
   14596 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14597 		device_xname(sc->sc_dev), __func__));
   14598 
   14599 	switch (sc->sc_type) {
   14600 	case WM_T_ICH8:
   14601 	case WM_T_ICH9:
   14602 	case WM_T_ICH10:
   14603 	case WM_T_PCH:
   14604 	case WM_T_PCH2:
   14605 	case WM_T_PCH_LPT:
   14606 	case WM_T_PCH_SPT:
   14607 	case WM_T_PCH_CNP:
   14608 		do {
   14609 			reg = CSR_READ(sc, WMREG_FWSM);
   14610 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14611 				blocked = true;
   14612 				delay(10*1000);
   14613 				continue;
   14614 			}
   14615 			blocked = false;
   14616 		} while (blocked && (i++ < 30));
   14617 		return blocked;
   14618 		break;
   14619 	case WM_T_82571:
   14620 	case WM_T_82572:
   14621 	case WM_T_82573:
   14622 	case WM_T_82574:
   14623 	case WM_T_82583:
   14624 	case WM_T_80003:
   14625 		reg = CSR_READ(sc, WMREG_MANC);
   14626 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14627 			return true;
   14628 		else
   14629 			return false;
   14630 		break;
   14631 	default:
   14632 		/* No problem */
   14633 		break;
   14634 	}
   14635 
   14636 	return false;
   14637 }
   14638 
   14639 static void
   14640 wm_get_hw_control(struct wm_softc *sc)
   14641 {
   14642 	uint32_t reg;
   14643 
   14644 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14645 		device_xname(sc->sc_dev), __func__));
   14646 
   14647 	if (sc->sc_type == WM_T_82573) {
   14648 		reg = CSR_READ(sc, WMREG_SWSM);
   14649 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14650 	} else if (sc->sc_type >= WM_T_82571) {
   14651 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14652 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14653 	}
   14654 }
   14655 
   14656 static void
   14657 wm_release_hw_control(struct wm_softc *sc)
   14658 {
   14659 	uint32_t reg;
   14660 
   14661 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14662 		device_xname(sc->sc_dev), __func__));
   14663 
   14664 	if (sc->sc_type == WM_T_82573) {
   14665 		reg = CSR_READ(sc, WMREG_SWSM);
   14666 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14667 	} else if (sc->sc_type >= WM_T_82571) {
   14668 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14669 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14670 	}
   14671 }
   14672 
   14673 static void
   14674 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14675 {
   14676 	uint32_t reg;
   14677 
   14678 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14679 		device_xname(sc->sc_dev), __func__));
   14680 
   14681 	if (sc->sc_type < WM_T_PCH2)
   14682 		return;
   14683 
   14684 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14685 
   14686 	if (gate)
   14687 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14688 	else
   14689 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14690 
   14691 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14692 }
   14693 
   14694 static int
   14695 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14696 {
   14697 	uint32_t fwsm, reg;
   14698 	int rv = 0;
   14699 
   14700 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14701 		device_xname(sc->sc_dev), __func__));
   14702 
   14703 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14704 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14705 
   14706 	/* Disable ULP */
   14707 	wm_ulp_disable(sc);
   14708 
   14709 	/* Acquire PHY semaphore */
   14710 	rv = sc->phy.acquire(sc);
   14711 	if (rv != 0) {
   14712 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14713 		device_xname(sc->sc_dev), __func__));
   14714 		return -1;
   14715 	}
   14716 
   14717 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14718 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14719 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14720 	 */
   14721 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14722 	switch (sc->sc_type) {
   14723 	case WM_T_PCH_LPT:
   14724 	case WM_T_PCH_SPT:
   14725 	case WM_T_PCH_CNP:
   14726 		if (wm_phy_is_accessible_pchlan(sc))
   14727 			break;
   14728 
   14729 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14730 		 * forcing MAC to SMBus mode first.
   14731 		 */
   14732 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14733 		reg |= CTRL_EXT_FORCE_SMBUS;
   14734 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14735 #if 0
   14736 		/* XXX Isn't this required??? */
   14737 		CSR_WRITE_FLUSH(sc);
   14738 #endif
   14739 		/* Wait 50 milliseconds for MAC to finish any retries
   14740 		 * that it might be trying to perform from previous
   14741 		 * attempts to acknowledge any phy read requests.
   14742 		 */
   14743 		delay(50 * 1000);
   14744 		/* FALLTHROUGH */
   14745 	case WM_T_PCH2:
   14746 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14747 			break;
   14748 		/* FALLTHROUGH */
   14749 	case WM_T_PCH:
   14750 		if (sc->sc_type == WM_T_PCH)
   14751 			if ((fwsm & FWSM_FW_VALID) != 0)
   14752 				break;
   14753 
   14754 		if (wm_phy_resetisblocked(sc) == true) {
   14755 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   14756 			break;
   14757 		}
   14758 
   14759 		/* Toggle LANPHYPC Value bit */
   14760 		wm_toggle_lanphypc_pch_lpt(sc);
   14761 
   14762 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14763 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14764 				break;
   14765 
   14766 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14767 			 * so ensure that the MAC is also out of SMBus mode
   14768 			 */
   14769 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14770 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14771 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14772 
   14773 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14774 				break;
   14775 			rv = -1;
   14776 		}
   14777 		break;
   14778 	default:
   14779 		break;
   14780 	}
   14781 
   14782 	/* Release semaphore */
   14783 	sc->phy.release(sc);
   14784 
   14785 	if (rv == 0) {
   14786 		/* Check to see if able to reset PHY.  Print error if not */
   14787 		if (wm_phy_resetisblocked(sc)) {
   14788 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14789 			goto out;
   14790 		}
   14791 
   14792 		/* Reset the PHY before any access to it.  Doing so, ensures
   14793 		 * that the PHY is in a known good state before we read/write
   14794 		 * PHY registers.  The generic reset is sufficient here,
   14795 		 * because we haven't determined the PHY type yet.
   14796 		 */
   14797 		if (wm_reset_phy(sc) != 0)
   14798 			goto out;
   14799 
   14800 		/* On a successful reset, possibly need to wait for the PHY
   14801 		 * to quiesce to an accessible state before returning control
   14802 		 * to the calling function.  If the PHY does not quiesce, then
   14803 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14804 		 *  the PHY is in.
   14805 		 */
   14806 		if (wm_phy_resetisblocked(sc))
   14807 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14808 	}
   14809 
   14810 out:
   14811 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14812 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14813 		delay(10*1000);
   14814 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14815 	}
   14816 
   14817 	return 0;
   14818 }
   14819 
   14820 static void
   14821 wm_init_manageability(struct wm_softc *sc)
   14822 {
   14823 
   14824 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14825 		device_xname(sc->sc_dev), __func__));
   14826 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14827 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14828 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14829 
   14830 		/* Disable hardware interception of ARP */
   14831 		manc &= ~MANC_ARP_EN;
   14832 
   14833 		/* Enable receiving management packets to the host */
   14834 		if (sc->sc_type >= WM_T_82571) {
   14835 			manc |= MANC_EN_MNG2HOST;
   14836 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14837 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14838 		}
   14839 
   14840 		CSR_WRITE(sc, WMREG_MANC, manc);
   14841 	}
   14842 }
   14843 
   14844 static void
   14845 wm_release_manageability(struct wm_softc *sc)
   14846 {
   14847 
   14848 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14849 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14850 
   14851 		manc |= MANC_ARP_EN;
   14852 		if (sc->sc_type >= WM_T_82571)
   14853 			manc &= ~MANC_EN_MNG2HOST;
   14854 
   14855 		CSR_WRITE(sc, WMREG_MANC, manc);
   14856 	}
   14857 }
   14858 
   14859 static void
   14860 wm_get_wakeup(struct wm_softc *sc)
   14861 {
   14862 
   14863 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14864 	switch (sc->sc_type) {
   14865 	case WM_T_82573:
   14866 	case WM_T_82583:
   14867 		sc->sc_flags |= WM_F_HAS_AMT;
   14868 		/* FALLTHROUGH */
   14869 	case WM_T_80003:
   14870 	case WM_T_82575:
   14871 	case WM_T_82576:
   14872 	case WM_T_82580:
   14873 	case WM_T_I350:
   14874 	case WM_T_I354:
   14875 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14876 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14877 		/* FALLTHROUGH */
   14878 	case WM_T_82541:
   14879 	case WM_T_82541_2:
   14880 	case WM_T_82547:
   14881 	case WM_T_82547_2:
   14882 	case WM_T_82571:
   14883 	case WM_T_82572:
   14884 	case WM_T_82574:
   14885 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14886 		break;
   14887 	case WM_T_ICH8:
   14888 	case WM_T_ICH9:
   14889 	case WM_T_ICH10:
   14890 	case WM_T_PCH:
   14891 	case WM_T_PCH2:
   14892 	case WM_T_PCH_LPT:
   14893 	case WM_T_PCH_SPT:
   14894 	case WM_T_PCH_CNP:
   14895 		sc->sc_flags |= WM_F_HAS_AMT;
   14896 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14897 		break;
   14898 	default:
   14899 		break;
   14900 	}
   14901 
   14902 	/* 1: HAS_MANAGE */
   14903 	if (wm_enable_mng_pass_thru(sc) != 0)
   14904 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14905 
   14906 	/*
   14907 	 * Note that the WOL flags is set after the resetting of the eeprom
   14908 	 * stuff
   14909 	 */
   14910 }
   14911 
   14912 /*
   14913  * Unconfigure Ultra Low Power mode.
   14914  * Only for I217 and newer (see below).
   14915  */
   14916 static int
   14917 wm_ulp_disable(struct wm_softc *sc)
   14918 {
   14919 	uint32_t reg;
   14920 	uint16_t phyreg;
   14921 	int i = 0, rv = 0;
   14922 
   14923 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14924 		device_xname(sc->sc_dev), __func__));
   14925 	/* Exclude old devices */
   14926 	if ((sc->sc_type < WM_T_PCH_LPT)
   14927 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14928 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14929 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14930 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14931 		return 0;
   14932 
   14933 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14934 		/* Request ME un-configure ULP mode in the PHY */
   14935 		reg = CSR_READ(sc, WMREG_H2ME);
   14936 		reg &= ~H2ME_ULP;
   14937 		reg |= H2ME_ENFORCE_SETTINGS;
   14938 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14939 
   14940 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14941 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14942 			if (i++ == 30) {
   14943 				device_printf(sc->sc_dev, "%s timed out\n",
   14944 				    __func__);
   14945 				return -1;
   14946 			}
   14947 			delay(10 * 1000);
   14948 		}
   14949 		reg = CSR_READ(sc, WMREG_H2ME);
   14950 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14951 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14952 
   14953 		return 0;
   14954 	}
   14955 
   14956 	/* Acquire semaphore */
   14957 	rv = sc->phy.acquire(sc);
   14958 	if (rv != 0) {
   14959 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14960 		device_xname(sc->sc_dev), __func__));
   14961 		return -1;
   14962 	}
   14963 
   14964 	/* Toggle LANPHYPC */
   14965 	wm_toggle_lanphypc_pch_lpt(sc);
   14966 
   14967 	/* Unforce SMBus mode in PHY */
   14968 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14969 	if (rv != 0) {
   14970 		uint32_t reg2;
   14971 
   14972 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   14973 			__func__);
   14974 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14975 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14976 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14977 		delay(50 * 1000);
   14978 
   14979 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14980 		    &phyreg);
   14981 		if (rv != 0)
   14982 			goto release;
   14983 	}
   14984 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14985 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14986 
   14987 	/* Unforce SMBus mode in MAC */
   14988 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14989 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14990 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14991 
   14992 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14993 	if (rv != 0)
   14994 		goto release;
   14995 	phyreg |= HV_PM_CTRL_K1_ENA;
   14996 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14997 
   14998 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14999 		&phyreg);
   15000 	if (rv != 0)
   15001 		goto release;
   15002 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15003 	    | I218_ULP_CONFIG1_STICKY_ULP
   15004 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15005 	    | I218_ULP_CONFIG1_WOL_HOST
   15006 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15007 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15008 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15009 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15010 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15011 	phyreg |= I218_ULP_CONFIG1_START;
   15012 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15013 
   15014 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15015 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15016 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15017 
   15018 release:
   15019 	/* Release semaphore */
   15020 	sc->phy.release(sc);
   15021 	wm_gmii_reset(sc);
   15022 	delay(50 * 1000);
   15023 
   15024 	return rv;
   15025 }
   15026 
   15027 /* WOL in the newer chipset interfaces (pchlan) */
   15028 static int
   15029 wm_enable_phy_wakeup(struct wm_softc *sc)
   15030 {
   15031 	device_t dev = sc->sc_dev;
   15032 	uint32_t mreg, moff;
   15033 	uint16_t wuce, wuc, wufc, preg;
   15034 	int i, rv;
   15035 
   15036 	KASSERT(sc->sc_type >= WM_T_PCH);
   15037 
   15038 	/* Copy MAC RARs to PHY RARs */
   15039 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15040 
   15041 	/* Activate PHY wakeup */
   15042 	rv = sc->phy.acquire(sc);
   15043 	if (rv != 0) {
   15044 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15045 		    __func__);
   15046 		return rv;
   15047 	}
   15048 
   15049 	/*
   15050 	 * Enable access to PHY wakeup registers.
   15051 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15052 	 */
   15053 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15054 	if (rv != 0) {
   15055 		device_printf(dev,
   15056 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15057 		goto release;
   15058 	}
   15059 
   15060 	/* Copy MAC MTA to PHY MTA */
   15061 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15062 		uint16_t lo, hi;
   15063 
   15064 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15065 		lo = (uint16_t)(mreg & 0xffff);
   15066 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15067 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15068 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15069 	}
   15070 
   15071 	/* Configure PHY Rx Control register */
   15072 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15073 	mreg = CSR_READ(sc, WMREG_RCTL);
   15074 	if (mreg & RCTL_UPE)
   15075 		preg |= BM_RCTL_UPE;
   15076 	if (mreg & RCTL_MPE)
   15077 		preg |= BM_RCTL_MPE;
   15078 	preg &= ~(BM_RCTL_MO_MASK);
   15079 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15080 	if (moff != 0)
   15081 		preg |= moff << BM_RCTL_MO_SHIFT;
   15082 	if (mreg & RCTL_BAM)
   15083 		preg |= BM_RCTL_BAM;
   15084 	if (mreg & RCTL_PMCF)
   15085 		preg |= BM_RCTL_PMCF;
   15086 	mreg = CSR_READ(sc, WMREG_CTRL);
   15087 	if (mreg & CTRL_RFCE)
   15088 		preg |= BM_RCTL_RFCE;
   15089 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15090 
   15091 	wuc = WUC_APME | WUC_PME_EN;
   15092 	wufc = WUFC_MAG;
   15093 	/* Enable PHY wakeup in MAC register */
   15094 	CSR_WRITE(sc, WMREG_WUC,
   15095 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15096 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15097 
   15098 	/* Configure and enable PHY wakeup in PHY registers */
   15099 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15100 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15101 
   15102 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15103 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15104 
   15105 release:
   15106 	sc->phy.release(sc);
   15107 
   15108 	return 0;
   15109 }
   15110 
   15111 /* Power down workaround on D3 */
   15112 static void
   15113 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15114 {
   15115 	uint32_t reg;
   15116 	uint16_t phyreg;
   15117 	int i;
   15118 
   15119 	for (i = 0; i < 2; i++) {
   15120 		/* Disable link */
   15121 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15122 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15123 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15124 
   15125 		/*
   15126 		 * Call gig speed drop workaround on Gig disable before
   15127 		 * accessing any PHY registers
   15128 		 */
   15129 		if (sc->sc_type == WM_T_ICH8)
   15130 			wm_gig_downshift_workaround_ich8lan(sc);
   15131 
   15132 		/* Write VR power-down enable */
   15133 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15134 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15135 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15136 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15137 
   15138 		/* Read it back and test */
   15139 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15140 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15141 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15142 			break;
   15143 
   15144 		/* Issue PHY reset and repeat at most one more time */
   15145 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15146 	}
   15147 }
   15148 
   15149 /*
   15150  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15151  *  @sc: pointer to the HW structure
   15152  *
   15153  *  During S0 to Sx transition, it is possible the link remains at gig
   15154  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15155  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15156  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15157  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15158  *  needs to be written.
   15159  *  Parts that support (and are linked to a partner which support) EEE in
   15160  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15161  *  than 10Mbps w/o EEE.
   15162  */
   15163 static void
   15164 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15165 {
   15166 	device_t dev = sc->sc_dev;
   15167 	struct ethercom *ec = &sc->sc_ethercom;
   15168 	uint32_t phy_ctrl;
   15169 	int rv;
   15170 
   15171 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15172 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15173 
   15174 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15175 
   15176 	if (sc->sc_phytype == WMPHY_I217) {
   15177 		uint16_t devid = sc->sc_pcidevid;
   15178 
   15179 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15180 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15181 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15182 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15183 		    (sc->sc_type >= WM_T_PCH_SPT))
   15184 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15185 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15186 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15187 
   15188 		if (sc->phy.acquire(sc) != 0)
   15189 			goto out;
   15190 
   15191 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15192 			uint16_t eee_advert;
   15193 
   15194 			rv = wm_read_emi_reg_locked(dev,
   15195 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15196 			if (rv)
   15197 				goto release;
   15198 
   15199 			/*
   15200 			 * Disable LPLU if both link partners support 100BaseT
   15201 			 * EEE and 100Full is advertised on both ends of the
   15202 			 * link, and enable Auto Enable LPI since there will
   15203 			 * be no driver to enable LPI while in Sx.
   15204 			 */
   15205 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15206 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15207 				uint16_t anar, phy_reg;
   15208 
   15209 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15210 				    &anar);
   15211 				if (anar & ANAR_TX_FD) {
   15212 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15213 					    PHY_CTRL_NOND0A_LPLU);
   15214 
   15215 					/* Set Auto Enable LPI after link up */
   15216 					sc->phy.readreg_locked(dev, 2,
   15217 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15218 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15219 					sc->phy.writereg_locked(dev, 2,
   15220 					    I217_LPI_GPIO_CTRL, phy_reg);
   15221 				}
   15222 			}
   15223 		}
   15224 
   15225 		/*
   15226 		 * For i217 Intel Rapid Start Technology support,
   15227 		 * when the system is going into Sx and no manageability engine
   15228 		 * is present, the driver must configure proxy to reset only on
   15229 		 * power good.	LPI (Low Power Idle) state must also reset only
   15230 		 * on power good, as well as the MTA (Multicast table array).
   15231 		 * The SMBus release must also be disabled on LCD reset.
   15232 		 */
   15233 
   15234 		/*
   15235 		 * Enable MTA to reset for Intel Rapid Start Technology
   15236 		 * Support
   15237 		 */
   15238 
   15239 release:
   15240 		sc->phy.release(sc);
   15241 	}
   15242 out:
   15243 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15244 
   15245 	if (sc->sc_type == WM_T_ICH8)
   15246 		wm_gig_downshift_workaround_ich8lan(sc);
   15247 
   15248 	if (sc->sc_type >= WM_T_PCH) {
   15249 		wm_oem_bits_config_ich8lan(sc, false);
   15250 
   15251 		/* Reset PHY to activate OEM bits on 82577/8 */
   15252 		if (sc->sc_type == WM_T_PCH)
   15253 			wm_reset_phy(sc);
   15254 
   15255 		if (sc->phy.acquire(sc) != 0)
   15256 			return;
   15257 		wm_write_smbus_addr(sc);
   15258 		sc->phy.release(sc);
   15259 	}
   15260 }
   15261 
   15262 /*
   15263  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15264  *  @sc: pointer to the HW structure
   15265  *
   15266  *  During Sx to S0 transitions on non-managed devices or managed devices
   15267  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15268  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15269  *  the PHY.
   15270  *  On i217, setup Intel Rapid Start Technology.
   15271  */
   15272 static int
   15273 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15274 {
   15275 	device_t dev = sc->sc_dev;
   15276 	int rv;
   15277 
   15278 	if (sc->sc_type < WM_T_PCH2)
   15279 		return 0;
   15280 
   15281 	rv = wm_init_phy_workarounds_pchlan(sc);
   15282 	if (rv != 0)
   15283 		return -1;
   15284 
   15285 	/* For i217 Intel Rapid Start Technology support when the system
   15286 	 * is transitioning from Sx and no manageability engine is present
   15287 	 * configure SMBus to restore on reset, disable proxy, and enable
   15288 	 * the reset on MTA (Multicast table array).
   15289 	 */
   15290 	if (sc->sc_phytype == WMPHY_I217) {
   15291 		uint16_t phy_reg;
   15292 
   15293 		if (sc->phy.acquire(sc) != 0)
   15294 			return -1;
   15295 
   15296 		/* Clear Auto Enable LPI after link up */
   15297 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15298 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15299 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15300 
   15301 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15302 			/* Restore clear on SMB if no manageability engine
   15303 			 * is present
   15304 			 */
   15305 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15306 			    &phy_reg);
   15307 			if (rv != 0)
   15308 				goto release;
   15309 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15310 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15311 
   15312 			/* Disable Proxy */
   15313 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15314 		}
   15315 		/* Enable reset on MTA */
   15316 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15317 		if (rv != 0)
   15318 			goto release;
   15319 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15320 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15321 
   15322 release:
   15323 		sc->phy.release(sc);
   15324 		return rv;
   15325 	}
   15326 
   15327 	return 0;
   15328 }
   15329 
   15330 static void
   15331 wm_enable_wakeup(struct wm_softc *sc)
   15332 {
   15333 	uint32_t reg, pmreg;
   15334 	pcireg_t pmode;
   15335 	int rv = 0;
   15336 
   15337 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15338 		device_xname(sc->sc_dev), __func__));
   15339 
   15340 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15341 	    &pmreg, NULL) == 0)
   15342 		return;
   15343 
   15344 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15345 		goto pme;
   15346 
   15347 	/* Advertise the wakeup capability */
   15348 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15349 	    | CTRL_SWDPIN(3));
   15350 
   15351 	/* Keep the laser running on fiber adapters */
   15352 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15353 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15354 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15355 		reg |= CTRL_EXT_SWDPIN(3);
   15356 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15357 	}
   15358 
   15359 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15360 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15361 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15362 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15363 		wm_suspend_workarounds_ich8lan(sc);
   15364 
   15365 #if 0	/* For the multicast packet */
   15366 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15367 	reg |= WUFC_MC;
   15368 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15369 #endif
   15370 
   15371 	if (sc->sc_type >= WM_T_PCH) {
   15372 		rv = wm_enable_phy_wakeup(sc);
   15373 		if (rv != 0)
   15374 			goto pme;
   15375 	} else {
   15376 		/* Enable wakeup by the MAC */
   15377 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15378 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15379 	}
   15380 
   15381 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15382 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15383 		|| (sc->sc_type == WM_T_PCH2))
   15384 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15385 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15386 
   15387 pme:
   15388 	/* Request PME */
   15389 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15390 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   15391 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15392 		/* For WOL */
   15393 		pmode |= PCI_PMCSR_PME_EN;
   15394 	} else {
   15395 		/* Disable WOL */
   15396 		pmode &= ~PCI_PMCSR_PME_EN;
   15397 	}
   15398 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15399 }
   15400 
   15401 /* Disable ASPM L0s and/or L1 for workaround */
   15402 static void
   15403 wm_disable_aspm(struct wm_softc *sc)
   15404 {
   15405 	pcireg_t reg, mask = 0;
   15406 	unsigned const char *str = "";
   15407 
   15408 	/*
   15409 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15410 	 * space.
   15411 	 */
   15412 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15413 		return;
   15414 
   15415 	switch (sc->sc_type) {
   15416 	case WM_T_82571:
   15417 	case WM_T_82572:
   15418 		/*
   15419 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15420 		 * State Power management L1 State (ASPM L1).
   15421 		 */
   15422 		mask = PCIE_LCSR_ASPM_L1;
   15423 		str = "L1 is";
   15424 		break;
   15425 	case WM_T_82573:
   15426 	case WM_T_82574:
   15427 	case WM_T_82583:
   15428 		/*
   15429 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15430 		 *
   15431 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15432 		 * some chipset.  The document of 82574 and 82583 says that
   15433 		 * disabling L0s with some specific chipset is sufficient,
   15434 		 * but we follow as of the Intel em driver does.
   15435 		 *
   15436 		 * References:
   15437 		 * Errata 8 of the Specification Update of i82573.
   15438 		 * Errata 20 of the Specification Update of i82574.
   15439 		 * Errata 9 of the Specification Update of i82583.
   15440 		 */
   15441 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15442 		str = "L0s and L1 are";
   15443 		break;
   15444 	default:
   15445 		return;
   15446 	}
   15447 
   15448 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15449 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15450 	reg &= ~mask;
   15451 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15452 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15453 
   15454 	/* Print only in wm_attach() */
   15455 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15456 		aprint_verbose_dev(sc->sc_dev,
   15457 		    "ASPM %s disabled to workaround the errata.\n", str);
   15458 }
   15459 
   15460 /* LPLU */
   15461 
   15462 static void
   15463 wm_lplu_d0_disable(struct wm_softc *sc)
   15464 {
   15465 	struct mii_data *mii = &sc->sc_mii;
   15466 	uint32_t reg;
   15467 	uint16_t phyval;
   15468 
   15469 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15470 		device_xname(sc->sc_dev), __func__));
   15471 
   15472 	if (sc->sc_phytype == WMPHY_IFE)
   15473 		return;
   15474 
   15475 	switch (sc->sc_type) {
   15476 	case WM_T_82571:
   15477 	case WM_T_82572:
   15478 	case WM_T_82573:
   15479 	case WM_T_82575:
   15480 	case WM_T_82576:
   15481 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   15482 		phyval &= ~PMR_D0_LPLU;
   15483 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   15484 		break;
   15485 	case WM_T_82580:
   15486 	case WM_T_I350:
   15487 	case WM_T_I210:
   15488 	case WM_T_I211:
   15489 		reg = CSR_READ(sc, WMREG_PHPM);
   15490 		reg &= ~PHPM_D0A_LPLU;
   15491 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15492 		break;
   15493 	case WM_T_82574:
   15494 	case WM_T_82583:
   15495 	case WM_T_ICH8:
   15496 	case WM_T_ICH9:
   15497 	case WM_T_ICH10:
   15498 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15499 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15500 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15501 		CSR_WRITE_FLUSH(sc);
   15502 		break;
   15503 	case WM_T_PCH:
   15504 	case WM_T_PCH2:
   15505 	case WM_T_PCH_LPT:
   15506 	case WM_T_PCH_SPT:
   15507 	case WM_T_PCH_CNP:
   15508 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15509 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15510 		if (wm_phy_resetisblocked(sc) == false)
   15511 			phyval |= HV_OEM_BITS_ANEGNOW;
   15512 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15513 		break;
   15514 	default:
   15515 		break;
   15516 	}
   15517 }
   15518 
   15519 /* EEE */
   15520 
   15521 static int
   15522 wm_set_eee_i350(struct wm_softc *sc)
   15523 {
   15524 	struct ethercom *ec = &sc->sc_ethercom;
   15525 	uint32_t ipcnfg, eeer;
   15526 	uint32_t ipcnfg_mask
   15527 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15528 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15529 
   15530 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15531 
   15532 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15533 	eeer = CSR_READ(sc, WMREG_EEER);
   15534 
   15535 	/* Enable or disable per user setting */
   15536 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15537 		ipcnfg |= ipcnfg_mask;
   15538 		eeer |= eeer_mask;
   15539 	} else {
   15540 		ipcnfg &= ~ipcnfg_mask;
   15541 		eeer &= ~eeer_mask;
   15542 	}
   15543 
   15544 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15545 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15546 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15547 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15548 
   15549 	return 0;
   15550 }
   15551 
   15552 static int
   15553 wm_set_eee_pchlan(struct wm_softc *sc)
   15554 {
   15555 	device_t dev = sc->sc_dev;
   15556 	struct ethercom *ec = &sc->sc_ethercom;
   15557 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15558 	int rv = 0;
   15559 
   15560 	switch (sc->sc_phytype) {
   15561 	case WMPHY_82579:
   15562 		lpa = I82579_EEE_LP_ABILITY;
   15563 		pcs_status = I82579_EEE_PCS_STATUS;
   15564 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15565 		break;
   15566 	case WMPHY_I217:
   15567 		lpa = I217_EEE_LP_ABILITY;
   15568 		pcs_status = I217_EEE_PCS_STATUS;
   15569 		adv_addr = I217_EEE_ADVERTISEMENT;
   15570 		break;
   15571 	default:
   15572 		return 0;
   15573 	}
   15574 
   15575 	if (sc->phy.acquire(sc)) {
   15576 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15577 		return 0;
   15578 	}
   15579 
   15580 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15581 	if (rv != 0)
   15582 		goto release;
   15583 
   15584 	/* Clear bits that enable EEE in various speeds */
   15585 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15586 
   15587 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15588 		/* Save off link partner's EEE ability */
   15589 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15590 		if (rv != 0)
   15591 			goto release;
   15592 
   15593 		/* Read EEE advertisement */
   15594 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15595 			goto release;
   15596 
   15597 		/*
   15598 		 * Enable EEE only for speeds in which the link partner is
   15599 		 * EEE capable and for which we advertise EEE.
   15600 		 */
   15601 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15602 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15603 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15604 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15605 			if ((data & ANLPAR_TX_FD) != 0)
   15606 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15607 			else {
   15608 				/*
   15609 				 * EEE is not supported in 100Half, so ignore
   15610 				 * partner's EEE in 100 ability if full-duplex
   15611 				 * is not advertised.
   15612 				 */
   15613 				sc->eee_lp_ability
   15614 				    &= ~AN_EEEADVERT_100_TX;
   15615 			}
   15616 		}
   15617 	}
   15618 
   15619 	if (sc->sc_phytype == WMPHY_82579) {
   15620 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15621 		if (rv != 0)
   15622 			goto release;
   15623 
   15624 		data &= ~I82579_LPI_PLL_SHUT_100;
   15625 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15626 	}
   15627 
   15628 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15629 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15630 		goto release;
   15631 
   15632 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15633 release:
   15634 	sc->phy.release(sc);
   15635 
   15636 	return rv;
   15637 }
   15638 
   15639 static int
   15640 wm_set_eee(struct wm_softc *sc)
   15641 {
   15642 	struct ethercom *ec = &sc->sc_ethercom;
   15643 
   15644 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15645 		return 0;
   15646 
   15647 	if (sc->sc_type == WM_T_I354) {
   15648 		/* I354 uses an external PHY */
   15649 		return 0; /* not yet */
   15650 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15651 		return wm_set_eee_i350(sc);
   15652 	else if (sc->sc_type >= WM_T_PCH2)
   15653 		return wm_set_eee_pchlan(sc);
   15654 
   15655 	return 0;
   15656 }
   15657 
   15658 /*
   15659  * Workarounds (mainly PHY related).
   15660  * Basically, PHY's workarounds are in the PHY drivers.
   15661  */
   15662 
   15663 /* Work-around for 82566 Kumeran PCS lock loss */
   15664 static int
   15665 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15666 {
   15667 	struct mii_data *mii = &sc->sc_mii;
   15668 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15669 	int i, reg, rv;
   15670 	uint16_t phyreg;
   15671 
   15672 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15673 		device_xname(sc->sc_dev), __func__));
   15674 
   15675 	/* If the link is not up, do nothing */
   15676 	if ((status & STATUS_LU) == 0)
   15677 		return 0;
   15678 
   15679 	/* Nothing to do if the link is other than 1Gbps */
   15680 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15681 		return 0;
   15682 
   15683 	for (i = 0; i < 10; i++) {
   15684 		/* read twice */
   15685 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15686 		if (rv != 0)
   15687 			return rv;
   15688 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15689 		if (rv != 0)
   15690 			return rv;
   15691 
   15692 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15693 			goto out;	/* GOOD! */
   15694 
   15695 		/* Reset the PHY */
   15696 		wm_reset_phy(sc);
   15697 		delay(5*1000);
   15698 	}
   15699 
   15700 	/* Disable GigE link negotiation */
   15701 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15702 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15703 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15704 
   15705 	/*
   15706 	 * Call gig speed drop workaround on Gig disable before accessing
   15707 	 * any PHY registers.
   15708 	 */
   15709 	wm_gig_downshift_workaround_ich8lan(sc);
   15710 
   15711 out:
   15712 	return 0;
   15713 }
   15714 
   15715 /*
   15716  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15717  *  @sc: pointer to the HW structure
   15718  *
   15719  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15720  *  LPLU, Gig disable, MDIC PHY reset):
   15721  *    1) Set Kumeran Near-end loopback
   15722  *    2) Clear Kumeran Near-end loopback
   15723  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15724  */
   15725 static void
   15726 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15727 {
   15728 	uint16_t kmreg;
   15729 
   15730 	/* Only for igp3 */
   15731 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15732 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15733 			return;
   15734 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15735 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15736 			return;
   15737 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15738 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15739 	}
   15740 }
   15741 
   15742 /*
   15743  * Workaround for pch's PHYs
   15744  * XXX should be moved to new PHY driver?
   15745  */
   15746 static int
   15747 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15748 {
   15749 	device_t dev = sc->sc_dev;
   15750 	struct mii_data *mii = &sc->sc_mii;
   15751 	struct mii_softc *child;
   15752 	uint16_t phy_data, phyrev = 0;
   15753 	int phytype = sc->sc_phytype;
   15754 	int rv;
   15755 
   15756 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15757 		device_xname(dev), __func__));
   15758 	KASSERT(sc->sc_type == WM_T_PCH);
   15759 
   15760 	/* Set MDIO slow mode before any other MDIO access */
   15761 	if (phytype == WMPHY_82577)
   15762 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15763 			return rv;
   15764 
   15765 	child = LIST_FIRST(&mii->mii_phys);
   15766 	if (child != NULL)
   15767 		phyrev = child->mii_mpd_rev;
   15768 
   15769 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15770 	if ((child != NULL) &&
   15771 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15772 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15773 		/* Disable generation of early preamble (0x4431) */
   15774 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15775 		    &phy_data);
   15776 		if (rv != 0)
   15777 			return rv;
   15778 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15779 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15780 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15781 		    phy_data);
   15782 		if (rv != 0)
   15783 			return rv;
   15784 
   15785 		/* Preamble tuning for SSC */
   15786 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15787 		if (rv != 0)
   15788 			return rv;
   15789 	}
   15790 
   15791 	/* 82578 */
   15792 	if (phytype == WMPHY_82578) {
   15793 		/*
   15794 		 * Return registers to default by doing a soft reset then
   15795 		 * writing 0x3140 to the control register
   15796 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15797 		 */
   15798 		if ((child != NULL) && (phyrev < 2)) {
   15799 			PHY_RESET(child);
   15800 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   15801 			if (rv != 0)
   15802 				return rv;
   15803 		}
   15804 	}
   15805 
   15806 	/* Select page 0 */
   15807 	if ((rv = sc->phy.acquire(sc)) != 0)
   15808 		return rv;
   15809 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15810 	sc->phy.release(sc);
   15811 	if (rv != 0)
   15812 		return rv;
   15813 
   15814 	/*
   15815 	 * Configure the K1 Si workaround during phy reset assuming there is
   15816 	 * link so that it disables K1 if link is in 1Gbps.
   15817 	 */
   15818 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15819 		return rv;
   15820 
   15821 	/* Workaround for link disconnects on a busy hub in half duplex */
   15822 	rv = sc->phy.acquire(sc);
   15823 	if (rv)
   15824 		return rv;
   15825 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15826 	if (rv)
   15827 		goto release;
   15828 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15829 	    phy_data & 0x00ff);
   15830 	if (rv)
   15831 		goto release;
   15832 
   15833 	/* Set MSE higher to enable link to stay up when noise is high */
   15834 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15835 release:
   15836 	sc->phy.release(sc);
   15837 
   15838 	return rv;
   15839 }
   15840 
   15841 /*
   15842  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15843  *  @sc:   pointer to the HW structure
   15844  */
   15845 static void
   15846 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15847 {
   15848 	device_t dev = sc->sc_dev;
   15849 	uint32_t mac_reg;
   15850 	uint16_t i, wuce;
   15851 	int count;
   15852 
   15853 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15854 		device_xname(sc->sc_dev), __func__));
   15855 
   15856 	if (sc->phy.acquire(sc) != 0)
   15857 		return;
   15858 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15859 		goto release;
   15860 
   15861 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15862 	count = wm_rar_count(sc);
   15863 	for (i = 0; i < count; i++) {
   15864 		uint16_t lo, hi;
   15865 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15866 		lo = (uint16_t)(mac_reg & 0xffff);
   15867 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15868 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15869 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15870 
   15871 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15872 		lo = (uint16_t)(mac_reg & 0xffff);
   15873 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15874 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15875 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15876 	}
   15877 
   15878 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15879 
   15880 release:
   15881 	sc->phy.release(sc);
   15882 }
   15883 
   15884 /*
   15885  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15886  *  done after every PHY reset.
   15887  */
   15888 static int
   15889 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15890 {
   15891 	device_t dev = sc->sc_dev;
   15892 	int rv;
   15893 
   15894 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15895 		device_xname(dev), __func__));
   15896 	KASSERT(sc->sc_type == WM_T_PCH2);
   15897 
   15898 	/* Set MDIO slow mode before any other MDIO access */
   15899 	rv = wm_set_mdio_slow_mode_hv(sc);
   15900 	if (rv != 0)
   15901 		return rv;
   15902 
   15903 	rv = sc->phy.acquire(sc);
   15904 	if (rv != 0)
   15905 		return rv;
   15906 	/* Set MSE higher to enable link to stay up when noise is high */
   15907 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15908 	if (rv != 0)
   15909 		goto release;
   15910 	/* Drop link after 5 times MSE threshold was reached */
   15911 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15912 release:
   15913 	sc->phy.release(sc);
   15914 
   15915 	return rv;
   15916 }
   15917 
   15918 /**
   15919  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15920  *  @link: link up bool flag
   15921  *
   15922  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15923  *  preventing further DMA write requests.  Workaround the issue by disabling
   15924  *  the de-assertion of the clock request when in 1Gpbs mode.
   15925  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15926  *  speeds in order to avoid Tx hangs.
   15927  **/
   15928 static int
   15929 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15930 {
   15931 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15932 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15933 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15934 	uint16_t phyreg;
   15935 
   15936 	if (link && (speed == STATUS_SPEED_1000)) {
   15937 		sc->phy.acquire(sc);
   15938 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15939 		    &phyreg);
   15940 		if (rv != 0)
   15941 			goto release;
   15942 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15943 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15944 		if (rv != 0)
   15945 			goto release;
   15946 		delay(20);
   15947 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15948 
   15949 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15950 		    &phyreg);
   15951 release:
   15952 		sc->phy.release(sc);
   15953 		return rv;
   15954 	}
   15955 
   15956 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15957 
   15958 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15959 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15960 	    || !link
   15961 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15962 		goto update_fextnvm6;
   15963 
   15964 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15965 
   15966 	/* Clear link status transmit timeout */
   15967 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15968 	if (speed == STATUS_SPEED_100) {
   15969 		/* Set inband Tx timeout to 5x10us for 100Half */
   15970 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15971 
   15972 		/* Do not extend the K1 entry latency for 100Half */
   15973 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15974 	} else {
   15975 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15976 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15977 
   15978 		/* Extend the K1 entry latency for 10 Mbps */
   15979 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15980 	}
   15981 
   15982 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15983 
   15984 update_fextnvm6:
   15985 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15986 	return 0;
   15987 }
   15988 
   15989 /*
   15990  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15991  *  @sc:   pointer to the HW structure
   15992  *  @link: link up bool flag
   15993  *
   15994  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15995  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15996  *  If link is down, the function will restore the default K1 setting located
   15997  *  in the NVM.
   15998  */
   15999 static int
   16000 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   16001 {
   16002 	int k1_enable = sc->sc_nvm_k1_enabled;
   16003 
   16004 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16005 		device_xname(sc->sc_dev), __func__));
   16006 
   16007 	if (sc->phy.acquire(sc) != 0)
   16008 		return -1;
   16009 
   16010 	if (link) {
   16011 		k1_enable = 0;
   16012 
   16013 		/* Link stall fix for link up */
   16014 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16015 		    0x0100);
   16016 	} else {
   16017 		/* Link stall fix for link down */
   16018 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16019 		    0x4100);
   16020 	}
   16021 
   16022 	wm_configure_k1_ich8lan(sc, k1_enable);
   16023 	sc->phy.release(sc);
   16024 
   16025 	return 0;
   16026 }
   16027 
   16028 /*
   16029  *  wm_k1_workaround_lv - K1 Si workaround
   16030  *  @sc:   pointer to the HW structure
   16031  *
   16032  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16033  *  Disable K1 for 1000 and 100 speeds
   16034  */
   16035 static int
   16036 wm_k1_workaround_lv(struct wm_softc *sc)
   16037 {
   16038 	uint32_t reg;
   16039 	uint16_t phyreg;
   16040 	int rv;
   16041 
   16042 	if (sc->sc_type != WM_T_PCH2)
   16043 		return 0;
   16044 
   16045 	/* Set K1 beacon duration based on 10Mbps speed */
   16046 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16047 	if (rv != 0)
   16048 		return rv;
   16049 
   16050 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16051 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16052 		if (phyreg &
   16053 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16054 			/* LV 1G/100 Packet drop issue wa  */
   16055 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16056 			    &phyreg);
   16057 			if (rv != 0)
   16058 				return rv;
   16059 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16060 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16061 			    phyreg);
   16062 			if (rv != 0)
   16063 				return rv;
   16064 		} else {
   16065 			/* For 10Mbps */
   16066 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16067 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16068 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16069 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16070 		}
   16071 	}
   16072 
   16073 	return 0;
   16074 }
   16075 
   16076 /*
   16077  *  wm_link_stall_workaround_hv - Si workaround
   16078  *  @sc: pointer to the HW structure
   16079  *
   16080  *  This function works around a Si bug where the link partner can get
   16081  *  a link up indication before the PHY does. If small packets are sent
   16082  *  by the link partner they can be placed in the packet buffer without
   16083  *  being properly accounted for by the PHY and will stall preventing
   16084  *  further packets from being received.  The workaround is to clear the
   16085  *  packet buffer after the PHY detects link up.
   16086  */
   16087 static int
   16088 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16089 {
   16090 	uint16_t phyreg;
   16091 
   16092 	if (sc->sc_phytype != WMPHY_82578)
   16093 		return 0;
   16094 
   16095 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16096 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16097 	if ((phyreg & BMCR_LOOP) != 0)
   16098 		return 0;
   16099 
   16100 	/* Check if link is up and at 1Gbps */
   16101 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16102 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16103 	    | BM_CS_STATUS_SPEED_MASK;
   16104 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16105 		| BM_CS_STATUS_SPEED_1000))
   16106 		return 0;
   16107 
   16108 	delay(200 * 1000);	/* XXX too big */
   16109 
   16110 	/* Flush the packets in the fifo buffer */
   16111 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16112 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16113 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16114 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16115 
   16116 	return 0;
   16117 }
   16118 
   16119 static int
   16120 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16121 {
   16122 	int rv;
   16123 	uint16_t reg;
   16124 
   16125 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16126 	if (rv != 0)
   16127 		return rv;
   16128 
   16129 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16130 	    reg | HV_KMRN_MDIO_SLOW);
   16131 }
   16132 
   16133 /*
   16134  *  wm_configure_k1_ich8lan - Configure K1 power state
   16135  *  @sc: pointer to the HW structure
   16136  *  @enable: K1 state to configure
   16137  *
   16138  *  Configure the K1 power state based on the provided parameter.
   16139  *  Assumes semaphore already acquired.
   16140  */
   16141 static void
   16142 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16143 {
   16144 	uint32_t ctrl, ctrl_ext, tmp;
   16145 	uint16_t kmreg;
   16146 	int rv;
   16147 
   16148 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16149 
   16150 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16151 	if (rv != 0)
   16152 		return;
   16153 
   16154 	if (k1_enable)
   16155 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16156 	else
   16157 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16158 
   16159 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16160 	if (rv != 0)
   16161 		return;
   16162 
   16163 	delay(20);
   16164 
   16165 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16166 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16167 
   16168 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16169 	tmp |= CTRL_FRCSPD;
   16170 
   16171 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16172 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16173 	CSR_WRITE_FLUSH(sc);
   16174 	delay(20);
   16175 
   16176 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16177 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16178 	CSR_WRITE_FLUSH(sc);
   16179 	delay(20);
   16180 
   16181 	return;
   16182 }
   16183 
   16184 /* special case - for 82575 - need to do manual init ... */
   16185 static void
   16186 wm_reset_init_script_82575(struct wm_softc *sc)
   16187 {
   16188 	/*
   16189 	 * Remark: this is untested code - we have no board without EEPROM
   16190 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16191 	 */
   16192 
   16193 	/* SerDes configuration via SERDESCTRL */
   16194 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16195 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16196 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16197 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16198 
   16199 	/* CCM configuration via CCMCTL register */
   16200 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16201 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16202 
   16203 	/* PCIe lanes configuration */
   16204 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16205 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16206 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16207 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16208 
   16209 	/* PCIe PLL Configuration */
   16210 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16211 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16212 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16213 }
   16214 
   16215 static void
   16216 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16217 {
   16218 	uint32_t reg;
   16219 	uint16_t nvmword;
   16220 	int rv;
   16221 
   16222 	if (sc->sc_type != WM_T_82580)
   16223 		return;
   16224 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16225 		return;
   16226 
   16227 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16228 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16229 	if (rv != 0) {
   16230 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16231 		    __func__);
   16232 		return;
   16233 	}
   16234 
   16235 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16236 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16237 		reg |= MDICNFG_DEST;
   16238 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16239 		reg |= MDICNFG_COM_MDIO;
   16240 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16241 }
   16242 
   16243 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16244 
   16245 static bool
   16246 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16247 {
   16248 	uint32_t reg;
   16249 	uint16_t id1, id2;
   16250 	int i, rv;
   16251 
   16252 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16253 		device_xname(sc->sc_dev), __func__));
   16254 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16255 
   16256 	id1 = id2 = 0xffff;
   16257 	for (i = 0; i < 2; i++) {
   16258 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16259 		    &id1);
   16260 		if ((rv != 0) || MII_INVALIDID(id1))
   16261 			continue;
   16262 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16263 		    &id2);
   16264 		if ((rv != 0) || MII_INVALIDID(id2))
   16265 			continue;
   16266 		break;
   16267 	}
   16268 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16269 		goto out;
   16270 
   16271 	/*
   16272 	 * In case the PHY needs to be in mdio slow mode,
   16273 	 * set slow mode and try to get the PHY id again.
   16274 	 */
   16275 	rv = 0;
   16276 	if (sc->sc_type < WM_T_PCH_LPT) {
   16277 		sc->phy.release(sc);
   16278 		wm_set_mdio_slow_mode_hv(sc);
   16279 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16280 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16281 		sc->phy.acquire(sc);
   16282 	}
   16283 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16284 		device_printf(sc->sc_dev, "XXX return with false\n");
   16285 		return false;
   16286 	}
   16287 out:
   16288 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16289 		/* Only unforce SMBus if ME is not active */
   16290 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16291 			uint16_t phyreg;
   16292 
   16293 			/* Unforce SMBus mode in PHY */
   16294 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16295 			    CV_SMB_CTRL, &phyreg);
   16296 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16297 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16298 			    CV_SMB_CTRL, phyreg);
   16299 
   16300 			/* Unforce SMBus mode in MAC */
   16301 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16302 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16303 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16304 		}
   16305 	}
   16306 	return true;
   16307 }
   16308 
   16309 static void
   16310 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16311 {
   16312 	uint32_t reg;
   16313 	int i;
   16314 
   16315 	/* Set PHY Config Counter to 50msec */
   16316 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16317 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16318 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16319 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16320 
   16321 	/* Toggle LANPHYPC */
   16322 	reg = CSR_READ(sc, WMREG_CTRL);
   16323 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16324 	reg &= ~CTRL_LANPHYPC_VALUE;
   16325 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16326 	CSR_WRITE_FLUSH(sc);
   16327 	delay(1000);
   16328 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16329 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16330 	CSR_WRITE_FLUSH(sc);
   16331 
   16332 	if (sc->sc_type < WM_T_PCH_LPT)
   16333 		delay(50 * 1000);
   16334 	else {
   16335 		i = 20;
   16336 
   16337 		do {
   16338 			delay(5 * 1000);
   16339 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16340 		    && i--);
   16341 
   16342 		delay(30 * 1000);
   16343 	}
   16344 }
   16345 
   16346 static int
   16347 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16348 {
   16349 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16350 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16351 	uint32_t rxa;
   16352 	uint16_t scale = 0, lat_enc = 0;
   16353 	int32_t obff_hwm = 0;
   16354 	int64_t lat_ns, value;
   16355 
   16356 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16357 		device_xname(sc->sc_dev), __func__));
   16358 
   16359 	if (link) {
   16360 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16361 		uint32_t status;
   16362 		uint16_t speed;
   16363 		pcireg_t preg;
   16364 
   16365 		status = CSR_READ(sc, WMREG_STATUS);
   16366 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16367 		case STATUS_SPEED_10:
   16368 			speed = 10;
   16369 			break;
   16370 		case STATUS_SPEED_100:
   16371 			speed = 100;
   16372 			break;
   16373 		case STATUS_SPEED_1000:
   16374 			speed = 1000;
   16375 			break;
   16376 		default:
   16377 			device_printf(sc->sc_dev, "Unknown speed "
   16378 			    "(status = %08x)\n", status);
   16379 			return -1;
   16380 		}
   16381 
   16382 		/* Rx Packet Buffer Allocation size (KB) */
   16383 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16384 
   16385 		/*
   16386 		 * Determine the maximum latency tolerated by the device.
   16387 		 *
   16388 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16389 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16390 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16391 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16392 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16393 		 */
   16394 		lat_ns = ((int64_t)rxa * 1024 -
   16395 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16396 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16397 		if (lat_ns < 0)
   16398 			lat_ns = 0;
   16399 		else
   16400 			lat_ns /= speed;
   16401 		value = lat_ns;
   16402 
   16403 		while (value > LTRV_VALUE) {
   16404 			scale ++;
   16405 			value = howmany(value, __BIT(5));
   16406 		}
   16407 		if (scale > LTRV_SCALE_MAX) {
   16408 			device_printf(sc->sc_dev,
   16409 			    "Invalid LTR latency scale %d\n", scale);
   16410 			return -1;
   16411 		}
   16412 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16413 
   16414 		/* Determine the maximum latency tolerated by the platform */
   16415 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16416 		    WM_PCI_LTR_CAP_LPT);
   16417 		max_snoop = preg & 0xffff;
   16418 		max_nosnoop = preg >> 16;
   16419 
   16420 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16421 
   16422 		if (lat_enc > max_ltr_enc) {
   16423 			lat_enc = max_ltr_enc;
   16424 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16425 			    * PCI_LTR_SCALETONS(
   16426 				    __SHIFTOUT(lat_enc,
   16427 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16428 		}
   16429 
   16430 		if (lat_ns) {
   16431 			lat_ns *= speed * 1000;
   16432 			lat_ns /= 8;
   16433 			lat_ns /= 1000000000;
   16434 			obff_hwm = (int32_t)(rxa - lat_ns);
   16435 		}
   16436 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16437 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16438 			    "(rxa = %d, lat_ns = %d)\n",
   16439 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16440 			return -1;
   16441 		}
   16442 	}
   16443 	/* Snoop and No-Snoop latencies the same */
   16444 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16445 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16446 
   16447 	/* Set OBFF high water mark */
   16448 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16449 	reg |= obff_hwm;
   16450 	CSR_WRITE(sc, WMREG_SVT, reg);
   16451 
   16452 	/* Enable OBFF */
   16453 	reg = CSR_READ(sc, WMREG_SVCR);
   16454 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16455 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16456 
   16457 	return 0;
   16458 }
   16459 
   16460 /*
   16461  * I210 Errata 25 and I211 Errata 10
   16462  * Slow System Clock.
   16463  */
   16464 static int
   16465 wm_pll_workaround_i210(struct wm_softc *sc)
   16466 {
   16467 	uint32_t mdicnfg, wuc;
   16468 	uint32_t reg;
   16469 	pcireg_t pcireg;
   16470 	uint32_t pmreg;
   16471 	uint16_t nvmword, tmp_nvmword;
   16472 	uint16_t phyval;
   16473 	bool wa_done = false;
   16474 	int i, rv = 0;
   16475 
   16476 	/* Get Power Management cap offset */
   16477 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16478 	    &pmreg, NULL) == 0)
   16479 		return -1;
   16480 
   16481 	/* Save WUC and MDICNFG registers */
   16482 	wuc = CSR_READ(sc, WMREG_WUC);
   16483 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16484 
   16485 	reg = mdicnfg & ~MDICNFG_DEST;
   16486 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16487 
   16488 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   16489 		nvmword = INVM_DEFAULT_AL;
   16490 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16491 
   16492 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16493 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16494 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16495 
   16496 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16497 			rv = 0;
   16498 			break; /* OK */
   16499 		} else
   16500 			rv = -1;
   16501 
   16502 		wa_done = true;
   16503 		/* Directly reset the internal PHY */
   16504 		reg = CSR_READ(sc, WMREG_CTRL);
   16505 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16506 
   16507 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16508 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16509 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16510 
   16511 		CSR_WRITE(sc, WMREG_WUC, 0);
   16512 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16513 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16514 
   16515 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16516 		    pmreg + PCI_PMCSR);
   16517 		pcireg |= PCI_PMCSR_STATE_D3;
   16518 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16519 		    pmreg + PCI_PMCSR, pcireg);
   16520 		delay(1000);
   16521 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16522 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16523 		    pmreg + PCI_PMCSR, pcireg);
   16524 
   16525 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16526 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16527 
   16528 		/* Restore WUC register */
   16529 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16530 	}
   16531 
   16532 	/* Restore MDICNFG setting */
   16533 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16534 	if (wa_done)
   16535 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16536 	return rv;
   16537 }
   16538 
   16539 static void
   16540 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16541 {
   16542 	uint32_t reg;
   16543 
   16544 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16545 		device_xname(sc->sc_dev), __func__));
   16546 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16547 	    || (sc->sc_type == WM_T_PCH_CNP));
   16548 
   16549 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16550 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16551 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16552 
   16553 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16554 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16555 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16556 }
   16557