Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.673
      1 /*	$NetBSD: if_wm.c,v 1.673 2020/04/08 21:57:24 jdolecek Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.673 2020/04/08 21:57:24 jdolecek Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <net/rss_config.h>
    121 
    122 #include <netinet/in.h>			/* XXX for struct ip */
    123 #include <netinet/in_systm.h>		/* XXX for struct ip */
    124 #include <netinet/ip.h>			/* XXX for struct ip */
    125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    127 
    128 #include <sys/bus.h>
    129 #include <sys/intr.h>
    130 #include <machine/endian.h>
    131 
    132 #include <dev/mii/mii.h>
    133 #include <dev/mii/mdio.h>
    134 #include <dev/mii/miivar.h>
    135 #include <dev/mii/miidevs.h>
    136 #include <dev/mii/mii_bitbang.h>
    137 #include <dev/mii/ikphyreg.h>
    138 #include <dev/mii/igphyreg.h>
    139 #include <dev/mii/igphyvar.h>
    140 #include <dev/mii/inbmphyreg.h>
    141 #include <dev/mii/ihphyreg.h>
    142 
    143 #include <dev/pci/pcireg.h>
    144 #include <dev/pci/pcivar.h>
    145 #include <dev/pci/pcidevs.h>
    146 
    147 #include <dev/pci/if_wmreg.h>
    148 #include <dev/pci/if_wmvar.h>
    149 
    150 #ifdef WM_DEBUG
    151 #define	WM_DEBUG_LINK		__BIT(0)
    152 #define	WM_DEBUG_TX		__BIT(1)
    153 #define	WM_DEBUG_RX		__BIT(2)
    154 #define	WM_DEBUG_GMII		__BIT(3)
    155 #define	WM_DEBUG_MANAGE		__BIT(4)
    156 #define	WM_DEBUG_NVM		__BIT(5)
    157 #define	WM_DEBUG_INIT		__BIT(6)
    158 #define	WM_DEBUG_LOCK		__BIT(7)
    159 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    160     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    161 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
    162 #else
    163 #define	DPRINTF(x, y)	__nothing
    164 #endif /* WM_DEBUG */
    165 
    166 #ifdef NET_MPSAFE
    167 #define WM_MPSAFE	1
    168 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    169 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    170 #else
    171 #define CALLOUT_FLAGS	0
    172 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    173 #endif
    174 
    175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    176 
    177 /*
    178  * This device driver's max interrupt numbers.
    179  */
    180 #define WM_MAX_NQUEUEINTR	16
    181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    182 
    183 #ifndef WM_DISABLE_MSI
    184 #define	WM_DISABLE_MSI 0
    185 #endif
    186 #ifndef WM_DISABLE_MSIX
    187 #define	WM_DISABLE_MSIX 0
    188 #endif
    189 
    190 int wm_disable_msi = WM_DISABLE_MSI;
    191 int wm_disable_msix = WM_DISABLE_MSIX;
    192 
    193 #ifndef WM_WATCHDOG_TIMEOUT
    194 #define WM_WATCHDOG_TIMEOUT 5
    195 #endif
    196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    197 
    198 /*
    199  * Transmit descriptor list size.  Due to errata, we can only have
    200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    201  * on >= 82544. We tell the upper layers that they can queue a lot
    202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    203  * of them at a time.
    204  *
    205  * We allow up to 64 DMA segments per packet.  Pathological packet
    206  * chains containing many small mbufs have been observed in zero-copy
    207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    208  * m_defrag() is called to reduce it.
    209  */
    210 #define	WM_NTXSEGS		64
    211 #define	WM_IFQUEUELEN		256
    212 #define	WM_TXQUEUELEN_MAX	64
    213 #define	WM_TXQUEUELEN_MAX_82547	16
    214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    217 #define	WM_NTXDESC_82542	256
    218 #define	WM_NTXDESC_82544	4096
    219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    224 
    225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    226 
    227 #define	WM_TXINTERQSIZE		256
    228 
    229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    231 #endif
    232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    234 #endif
    235 
    236 /*
    237  * Receive descriptor list size.  We have one Rx buffer for normal
    238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    239  * packet.  We allocate 256 receive descriptors, each with a 2k
    240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    241  */
    242 #define	WM_NRXDESC		256U
    243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    246 
    247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    249 #endif
    250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    252 #endif
    253 
    254 typedef union txdescs {
    255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    257 } txdescs_t;
    258 
    259 typedef union rxdescs {
    260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    261 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    263 } rxdescs_t;
    264 
    265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    267 
    268 /*
    269  * Software state for transmit jobs.
    270  */
    271 struct wm_txsoft {
    272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    274 	int txs_firstdesc;		/* first descriptor in packet */
    275 	int txs_lastdesc;		/* last descriptor in packet */
    276 	int txs_ndesc;			/* # of descriptors used */
    277 };
    278 
    279 /*
    280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    282  * them together.
    283  */
    284 struct wm_rxsoft {
    285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    287 };
    288 
    289 #define WM_LINKUP_TIMEOUT	50
    290 
    291 static uint16_t swfwphysem[] = {
    292 	SWFW_PHY0_SM,
    293 	SWFW_PHY1_SM,
    294 	SWFW_PHY2_SM,
    295 	SWFW_PHY3_SM
    296 };
    297 
    298 static const uint32_t wm_82580_rxpbs_table[] = {
    299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    300 };
    301 
    302 struct wm_softc;
    303 
    304 #ifdef WM_EVENT_COUNTERS
    305 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    306 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    307 	struct evcnt qname##_ev_##evname;
    308 
    309 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    310 	do {								\
    311 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    312 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    313 		    "%s%02d%s", #qname, (qnum), #evname);		\
    314 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    315 		    (evtype), NULL, (xname),				\
    316 		    (q)->qname##_##evname##_evcnt_name);		\
    317 	} while (0)
    318 
    319 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    320 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    321 
    322 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    323 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    324 
    325 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    326 	evcnt_detach(&(q)->qname##_ev_##evname);
    327 #endif /* WM_EVENT_COUNTERS */
    328 
    329 struct wm_txqueue {
    330 	kmutex_t *txq_lock;		/* lock for tx operations */
    331 
    332 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    333 
    334 	/* Software state for the transmit descriptors. */
    335 	int txq_num;			/* must be a power of two */
    336 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    337 
    338 	/* TX control data structures. */
    339 	int txq_ndesc;			/* must be a power of two */
    340 	size_t txq_descsize;		/* a tx descriptor size */
    341 	txdescs_t *txq_descs_u;
    342 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    343 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    344 	int txq_desc_rseg;		/* real number of control segment */
    345 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    346 #define	txq_descs	txq_descs_u->sctxu_txdescs
    347 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    348 
    349 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    350 
    351 	int txq_free;			/* number of free Tx descriptors */
    352 	int txq_next;			/* next ready Tx descriptor */
    353 
    354 	int txq_sfree;			/* number of free Tx jobs */
    355 	int txq_snext;			/* next free Tx job */
    356 	int txq_sdirty;			/* dirty Tx jobs */
    357 
    358 	/* These 4 variables are used only on the 82547. */
    359 	int txq_fifo_size;		/* Tx FIFO size */
    360 	int txq_fifo_head;		/* current head of FIFO */
    361 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    362 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    363 
    364 	/*
    365 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    366 	 * CPUs. This queue intermediate them without block.
    367 	 */
    368 	pcq_t *txq_interq;
    369 
    370 	/*
    371 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    372 	 * to manage Tx H/W queue's busy flag.
    373 	 */
    374 	int txq_flags;			/* flags for H/W queue, see below */
    375 #define	WM_TXQ_NO_SPACE	0x1
    376 
    377 	bool txq_stopping;
    378 
    379 	bool txq_sending;
    380 	time_t txq_lastsent;
    381 
    382 	/* Checksum flags used for previous packet */
    383 	uint32_t 	txq_last_hw_cmd;
    384 	uint8_t 	txq_last_hw_fields;
    385 	uint16_t	txq_last_hw_ipcs;
    386 	uint16_t	txq_last_hw_tucs;
    387 
    388 	uint32_t txq_packets;		/* for AIM */
    389 	uint32_t txq_bytes;		/* for AIM */
    390 #ifdef WM_EVENT_COUNTERS
    391 	/* TX event counters */
    392 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    393 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    394 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    395 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    396 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    397 					    /* XXX not used? */
    398 
    399 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    400 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    401 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    402 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    403 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    404 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    405 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    406 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    407 					    /* other than toomanyseg */
    408 
    409 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    410 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    411 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    412 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
    413 
    414 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    415 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    416 #endif /* WM_EVENT_COUNTERS */
    417 };
    418 
    419 struct wm_rxqueue {
    420 	kmutex_t *rxq_lock;		/* lock for rx operations */
    421 
    422 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    423 
    424 	/* Software state for the receive descriptors. */
    425 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    426 
    427 	/* RX control data structures. */
    428 	int rxq_ndesc;			/* must be a power of two */
    429 	size_t rxq_descsize;		/* a rx descriptor size */
    430 	rxdescs_t *rxq_descs_u;
    431 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    432 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    433 	int rxq_desc_rseg;		/* real number of control segment */
    434 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    435 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    436 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    437 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    438 
    439 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    440 
    441 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    442 	int rxq_discard;
    443 	int rxq_len;
    444 	struct mbuf *rxq_head;
    445 	struct mbuf *rxq_tail;
    446 	struct mbuf **rxq_tailp;
    447 
    448 	bool rxq_stopping;
    449 
    450 	uint32_t rxq_packets;		/* for AIM */
    451 	uint32_t rxq_bytes;		/* for AIM */
    452 #ifdef WM_EVENT_COUNTERS
    453 	/* RX event counters */
    454 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    455 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    456 
    457 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    458 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    459 #endif
    460 };
    461 
    462 struct wm_queue {
    463 	int wmq_id;			/* index of TX/RX queues */
    464 	int wmq_intr_idx;		/* index of MSI-X tables */
    465 
    466 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    467 	bool wmq_set_itr;
    468 
    469 	struct wm_txqueue wmq_txq;
    470 	struct wm_rxqueue wmq_rxq;
    471 
    472 	bool wmq_txrx_use_workqueue;
    473 	struct work wmq_cookie;
    474 	void *wmq_si;
    475 	krndsource_t rnd_source;	/* random source */
    476 };
    477 
    478 struct wm_phyop {
    479 	int (*acquire)(struct wm_softc *);
    480 	void (*release)(struct wm_softc *);
    481 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    482 	int (*writereg_locked)(device_t, int, int, uint16_t);
    483 	int reset_delay_us;
    484 	bool no_errprint;
    485 };
    486 
    487 struct wm_nvmop {
    488 	int (*acquire)(struct wm_softc *);
    489 	void (*release)(struct wm_softc *);
    490 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    491 };
    492 
    493 /*
    494  * Software state per device.
    495  */
    496 struct wm_softc {
    497 	device_t sc_dev;		/* generic device information */
    498 	bus_space_tag_t sc_st;		/* bus space tag */
    499 	bus_space_handle_t sc_sh;	/* bus space handle */
    500 	bus_size_t sc_ss;		/* bus space size */
    501 	bus_space_tag_t sc_iot;		/* I/O space tag */
    502 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    503 	bus_size_t sc_ios;		/* I/O space size */
    504 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    505 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    506 	bus_size_t sc_flashs;		/* flash registers space size */
    507 	off_t sc_flashreg_offset;	/*
    508 					 * offset to flash registers from
    509 					 * start of BAR
    510 					 */
    511 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    512 
    513 	struct ethercom sc_ethercom;	/* ethernet common data */
    514 	struct mii_data sc_mii;		/* MII/media information */
    515 
    516 	pci_chipset_tag_t sc_pc;
    517 	pcitag_t sc_pcitag;
    518 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    519 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    520 
    521 	uint16_t sc_pcidevid;		/* PCI device ID */
    522 	wm_chip_type sc_type;		/* MAC type */
    523 	int sc_rev;			/* MAC revision */
    524 	wm_phy_type sc_phytype;		/* PHY type */
    525 	uint8_t sc_sfptype;		/* SFP type */
    526 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    527 #define	WM_MEDIATYPE_UNKNOWN		0x00
    528 #define	WM_MEDIATYPE_FIBER		0x01
    529 #define	WM_MEDIATYPE_COPPER		0x02
    530 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    531 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    532 	int sc_flags;			/* flags; see below */
    533 	u_short sc_if_flags;		/* last if_flags */
    534 	int sc_ec_capenable;		/* last ec_capenable */
    535 	int sc_flowflags;		/* 802.3x flow control flags */
    536 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    537 	int sc_align_tweak;
    538 
    539 	void *sc_ihs[WM_MAX_NINTR];	/*
    540 					 * interrupt cookie.
    541 					 * - legacy and msi use sc_ihs[0] only
    542 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    543 					 */
    544 	pci_intr_handle_t *sc_intrs;	/*
    545 					 * legacy and msi use sc_intrs[0] only
    546 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    547 					 */
    548 	int sc_nintrs;			/* number of interrupts */
    549 
    550 	int sc_link_intr_idx;		/* index of MSI-X tables */
    551 
    552 	callout_t sc_tick_ch;		/* tick callout */
    553 	bool sc_core_stopping;
    554 
    555 	int sc_nvm_ver_major;
    556 	int sc_nvm_ver_minor;
    557 	int sc_nvm_ver_build;
    558 	int sc_nvm_addrbits;		/* NVM address bits */
    559 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    560 	int sc_ich8_flash_base;
    561 	int sc_ich8_flash_bank_size;
    562 	int sc_nvm_k1_enabled;
    563 
    564 	int sc_nqueues;
    565 	struct wm_queue *sc_queue;
    566 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    567 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    568 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    569 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    570 	struct workqueue *sc_queue_wq;
    571 	bool sc_txrx_use_workqueue;
    572 
    573 	int sc_affinity_offset;
    574 
    575 #ifdef WM_EVENT_COUNTERS
    576 	/* Event counters. */
    577 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    578 
    579 	/* WM_T_82542_2_1 only */
    580 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    581 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    582 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    583 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    584 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    585 #endif /* WM_EVENT_COUNTERS */
    586 
    587 	struct sysctllog *sc_sysctllog;
    588 
    589 	/* This variable are used only on the 82547. */
    590 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    591 
    592 	uint32_t sc_ctrl;		/* prototype CTRL register */
    593 #if 0
    594 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    595 #endif
    596 	uint32_t sc_icr;		/* prototype interrupt bits */
    597 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    598 	uint32_t sc_tctl;		/* prototype TCTL register */
    599 	uint32_t sc_rctl;		/* prototype RCTL register */
    600 	uint32_t sc_txcw;		/* prototype TXCW register */
    601 	uint32_t sc_tipg;		/* prototype TIPG register */
    602 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    603 	uint32_t sc_pba;		/* prototype PBA register */
    604 
    605 	int sc_tbi_linkup;		/* TBI link status */
    606 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    607 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    608 
    609 	int sc_mchash_type;		/* multicast filter offset */
    610 
    611 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    612 
    613 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    614 	kmutex_t *sc_ich_phymtx;	/*
    615 					 * 82574/82583/ICH/PCH specific PHY
    616 					 * mutex. For 82574/82583, the mutex
    617 					 * is used for both PHY and NVM.
    618 					 */
    619 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    620 
    621 	struct wm_phyop phy;
    622 	struct wm_nvmop nvm;
    623 };
    624 
    625 #define WM_CORE_LOCK(_sc)						\
    626 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    627 #define WM_CORE_UNLOCK(_sc)						\
    628 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    629 #define WM_CORE_LOCKED(_sc)						\
    630 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    631 
    632 #define	WM_RXCHAIN_RESET(rxq)						\
    633 do {									\
    634 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    635 	*(rxq)->rxq_tailp = NULL;					\
    636 	(rxq)->rxq_len = 0;						\
    637 } while (/*CONSTCOND*/0)
    638 
    639 #define	WM_RXCHAIN_LINK(rxq, m)						\
    640 do {									\
    641 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    642 	(rxq)->rxq_tailp = &(m)->m_next;				\
    643 } while (/*CONSTCOND*/0)
    644 
    645 #ifdef WM_EVENT_COUNTERS
    646 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    647 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    648 
    649 #define WM_Q_EVCNT_INCR(qname, evname)			\
    650 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    651 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    652 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    653 #else /* !WM_EVENT_COUNTERS */
    654 #define	WM_EVCNT_INCR(ev)	/* nothing */
    655 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    656 
    657 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    658 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    659 #endif /* !WM_EVENT_COUNTERS */
    660 
    661 #define	CSR_READ(sc, reg)						\
    662 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    663 #define	CSR_WRITE(sc, reg, val)						\
    664 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    665 #define	CSR_WRITE_FLUSH(sc)						\
    666 	(void)CSR_READ((sc), WMREG_STATUS)
    667 
    668 #define ICH8_FLASH_READ32(sc, reg)					\
    669 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    670 	    (reg) + sc->sc_flashreg_offset)
    671 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    672 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    673 	    (reg) + sc->sc_flashreg_offset, (data))
    674 
    675 #define ICH8_FLASH_READ16(sc, reg)					\
    676 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    677 	    (reg) + sc->sc_flashreg_offset)
    678 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    679 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    680 	    (reg) + sc->sc_flashreg_offset, (data))
    681 
    682 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    683 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    684 
    685 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    686 #define	WM_CDTXADDR_HI(txq, x)						\
    687 	(sizeof(bus_addr_t) == 8 ?					\
    688 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    689 
    690 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    691 #define	WM_CDRXADDR_HI(rxq, x)						\
    692 	(sizeof(bus_addr_t) == 8 ?					\
    693 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    694 
    695 /*
    696  * Register read/write functions.
    697  * Other than CSR_{READ|WRITE}().
    698  */
    699 #if 0
    700 static inline uint32_t wm_io_read(struct wm_softc *, int);
    701 #endif
    702 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    703 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    704     uint32_t, uint32_t);
    705 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    706 
    707 /*
    708  * Descriptor sync/init functions.
    709  */
    710 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    711 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    712 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    713 
    714 /*
    715  * Device driver interface functions and commonly used functions.
    716  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    717  */
    718 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    719 static int	wm_match(device_t, cfdata_t, void *);
    720 static void	wm_attach(device_t, device_t, void *);
    721 static int	wm_detach(device_t, int);
    722 static bool	wm_suspend(device_t, const pmf_qual_t *);
    723 static bool	wm_resume(device_t, const pmf_qual_t *);
    724 static void	wm_watchdog(struct ifnet *);
    725 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    726     uint16_t *);
    727 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    728     uint16_t *);
    729 static void	wm_tick(void *);
    730 static int	wm_ifflags_cb(struct ethercom *);
    731 static int	wm_ioctl(struct ifnet *, u_long, void *);
    732 /* MAC address related */
    733 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    734 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    735 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    736 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    737 static int	wm_rar_count(struct wm_softc *);
    738 static void	wm_set_filter(struct wm_softc *);
    739 /* Reset and init related */
    740 static void	wm_set_vlan(struct wm_softc *);
    741 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    742 static void	wm_get_auto_rd_done(struct wm_softc *);
    743 static void	wm_lan_init_done(struct wm_softc *);
    744 static void	wm_get_cfg_done(struct wm_softc *);
    745 static int	wm_phy_post_reset(struct wm_softc *);
    746 static int	wm_write_smbus_addr(struct wm_softc *);
    747 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    748 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    749 static void	wm_initialize_hardware_bits(struct wm_softc *);
    750 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    751 static int	wm_reset_phy(struct wm_softc *);
    752 static void	wm_flush_desc_rings(struct wm_softc *);
    753 static void	wm_reset(struct wm_softc *);
    754 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    755 static void	wm_rxdrain(struct wm_rxqueue *);
    756 static void	wm_init_rss(struct wm_softc *);
    757 static void	wm_adjust_qnum(struct wm_softc *, int);
    758 static inline bool	wm_is_using_msix(struct wm_softc *);
    759 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    760 static int	wm_softint_establish(struct wm_softc *, int, int);
    761 static int	wm_setup_legacy(struct wm_softc *);
    762 static int	wm_setup_msix(struct wm_softc *);
    763 static int	wm_init(struct ifnet *);
    764 static int	wm_init_locked(struct ifnet *);
    765 static void	wm_init_sysctls(struct wm_softc *);
    766 static void	wm_unset_stopping_flags(struct wm_softc *);
    767 static void	wm_set_stopping_flags(struct wm_softc *);
    768 static void	wm_stop(struct ifnet *, int);
    769 static void	wm_stop_locked(struct ifnet *, bool, bool);
    770 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    771 static void	wm_82547_txfifo_stall(void *);
    772 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    773 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    774 /* DMA related */
    775 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    776 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    777 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    778 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    779     struct wm_txqueue *);
    780 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    781 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    782 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    783     struct wm_rxqueue *);
    784 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    785 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    786 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    787 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    788 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    789 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    790 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    791     struct wm_txqueue *);
    792 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    793     struct wm_rxqueue *);
    794 static int	wm_alloc_txrx_queues(struct wm_softc *);
    795 static void	wm_free_txrx_queues(struct wm_softc *);
    796 static int	wm_init_txrx_queues(struct wm_softc *);
    797 /* Start */
    798 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    799     struct wm_txsoft *, uint32_t *, uint8_t *);
    800 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    801 static void	wm_start(struct ifnet *);
    802 static void	wm_start_locked(struct ifnet *);
    803 static int	wm_transmit(struct ifnet *, struct mbuf *);
    804 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    805 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    806 		    bool);
    807 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    808     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    809 static void	wm_nq_start(struct ifnet *);
    810 static void	wm_nq_start_locked(struct ifnet *);
    811 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    812 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    813 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    814 		    bool);
    815 static void	wm_deferred_start_locked(struct wm_txqueue *);
    816 static void	wm_handle_queue(void *);
    817 static void	wm_handle_queue_work(struct work *, void *);
    818 /* Interrupt */
    819 static bool	wm_txeof(struct wm_txqueue *, u_int);
    820 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    821 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    822 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    823 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    824 static void	wm_linkintr(struct wm_softc *, uint32_t);
    825 static int	wm_intr_legacy(void *);
    826 static inline void	wm_txrxintr_disable(struct wm_queue *);
    827 static inline void	wm_txrxintr_enable(struct wm_queue *);
    828 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    829 static int	wm_txrxintr_msix(void *);
    830 static int	wm_linkintr_msix(void *);
    831 
    832 /*
    833  * Media related.
    834  * GMII, SGMII, TBI, SERDES and SFP.
    835  */
    836 /* Common */
    837 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    838 /* GMII related */
    839 static void	wm_gmii_reset(struct wm_softc *);
    840 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    841 static int	wm_get_phy_id_82575(struct wm_softc *);
    842 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    843 static int	wm_gmii_mediachange(struct ifnet *);
    844 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    845 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    846 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    847 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    848 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    849 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    850 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    851 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    852 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    853 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    854 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    855 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    856 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    857 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    858 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    859 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    860 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    861 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    862 	bool);
    863 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    864 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    865 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    866 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    867 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    868 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    869 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    870 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    871 static void	wm_gmii_statchg(struct ifnet *);
    872 /*
    873  * kumeran related (80003, ICH* and PCH*).
    874  * These functions are not for accessing MII registers but for accessing
    875  * kumeran specific registers.
    876  */
    877 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    878 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    879 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    880 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    881 /* EMI register related */
    882 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    883 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    884 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    885 /* SGMII */
    886 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    887 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    888 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    889 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    890 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    891 /* TBI related */
    892 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    893 static void	wm_tbi_mediainit(struct wm_softc *);
    894 static int	wm_tbi_mediachange(struct ifnet *);
    895 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    896 static int	wm_check_for_link(struct wm_softc *);
    897 static void	wm_tbi_tick(struct wm_softc *);
    898 /* SERDES related */
    899 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    900 static int	wm_serdes_mediachange(struct ifnet *);
    901 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    902 static void	wm_serdes_tick(struct wm_softc *);
    903 /* SFP related */
    904 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    905 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    906 
    907 /*
    908  * NVM related.
    909  * Microwire, SPI (w/wo EERD) and Flash.
    910  */
    911 /* Misc functions */
    912 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    913 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    914 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    915 /* Microwire */
    916 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    917 /* SPI */
    918 static int	wm_nvm_ready_spi(struct wm_softc *);
    919 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    920 /* Using with EERD */
    921 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    922 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    923 /* Flash */
    924 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    925     unsigned int *);
    926 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    927 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    928 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    929     uint32_t *);
    930 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    931 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    932 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    933 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    934 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    935 /* iNVM */
    936 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    937 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    938 /* Lock, detecting NVM type, validate checksum and read */
    939 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    940 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    941 static int	wm_nvm_validate_checksum(struct wm_softc *);
    942 static void	wm_nvm_version_invm(struct wm_softc *);
    943 static void	wm_nvm_version(struct wm_softc *);
    944 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    945 
    946 /*
    947  * Hardware semaphores.
    948  * Very complexed...
    949  */
    950 static int	wm_get_null(struct wm_softc *);
    951 static void	wm_put_null(struct wm_softc *);
    952 static int	wm_get_eecd(struct wm_softc *);
    953 static void	wm_put_eecd(struct wm_softc *);
    954 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    955 static void	wm_put_swsm_semaphore(struct wm_softc *);
    956 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    957 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    958 static int	wm_get_nvm_80003(struct wm_softc *);
    959 static void	wm_put_nvm_80003(struct wm_softc *);
    960 static int	wm_get_nvm_82571(struct wm_softc *);
    961 static void	wm_put_nvm_82571(struct wm_softc *);
    962 static int	wm_get_phy_82575(struct wm_softc *);
    963 static void	wm_put_phy_82575(struct wm_softc *);
    964 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    965 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    966 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    967 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    968 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    969 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    970 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    971 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    972 
    973 /*
    974  * Management mode and power management related subroutines.
    975  * BMC, AMT, suspend/resume and EEE.
    976  */
    977 #if 0
    978 static int	wm_check_mng_mode(struct wm_softc *);
    979 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    980 static int	wm_check_mng_mode_82574(struct wm_softc *);
    981 static int	wm_check_mng_mode_generic(struct wm_softc *);
    982 #endif
    983 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    984 static bool	wm_phy_resetisblocked(struct wm_softc *);
    985 static void	wm_get_hw_control(struct wm_softc *);
    986 static void	wm_release_hw_control(struct wm_softc *);
    987 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    988 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    989 static void	wm_init_manageability(struct wm_softc *);
    990 static void	wm_release_manageability(struct wm_softc *);
    991 static void	wm_get_wakeup(struct wm_softc *);
    992 static int	wm_ulp_disable(struct wm_softc *);
    993 static int	wm_enable_phy_wakeup(struct wm_softc *);
    994 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    995 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    996 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    997 static void	wm_enable_wakeup(struct wm_softc *);
    998 static void	wm_disable_aspm(struct wm_softc *);
    999 /* LPLU (Low Power Link Up) */
   1000 static void	wm_lplu_d0_disable(struct wm_softc *);
   1001 /* EEE */
   1002 static int	wm_set_eee_i350(struct wm_softc *);
   1003 static int	wm_set_eee_pchlan(struct wm_softc *);
   1004 static int	wm_set_eee(struct wm_softc *);
   1005 
   1006 /*
   1007  * Workarounds (mainly PHY related).
   1008  * Basically, PHY's workarounds are in the PHY drivers.
   1009  */
   1010 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1011 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1012 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1013 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1014 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1015 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1016 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1017 static int	wm_k1_workaround_lv(struct wm_softc *);
   1018 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1019 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1020 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1021 static void	wm_reset_init_script_82575(struct wm_softc *);
   1022 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1023 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1024 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1025 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1026 static int	wm_pll_workaround_i210(struct wm_softc *);
   1027 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1028 
   1029 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1030     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1031 
   1032 /*
   1033  * Devices supported by this driver.
   1034  */
   1035 static const struct wm_product {
   1036 	pci_vendor_id_t		wmp_vendor;
   1037 	pci_product_id_t	wmp_product;
   1038 	const char		*wmp_name;
   1039 	wm_chip_type		wmp_type;
   1040 	uint32_t		wmp_flags;
   1041 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1042 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1043 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1044 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1045 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1046 } wm_products[] = {
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1048 	  "Intel i82542 1000BASE-X Ethernet",
   1049 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1052 	  "Intel i82543GC 1000BASE-X Ethernet",
   1053 	  WM_T_82543,		WMP_F_FIBER },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1056 	  "Intel i82543GC 1000BASE-T Ethernet",
   1057 	  WM_T_82543,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1060 	  "Intel i82544EI 1000BASE-T Ethernet",
   1061 	  WM_T_82544,		WMP_F_COPPER },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1064 	  "Intel i82544EI 1000BASE-X Ethernet",
   1065 	  WM_T_82544,		WMP_F_FIBER },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1068 	  "Intel i82544GC 1000BASE-T Ethernet",
   1069 	  WM_T_82544,		WMP_F_COPPER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1072 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1073 	  WM_T_82544,		WMP_F_COPPER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1076 	  "Intel i82540EM 1000BASE-T Ethernet",
   1077 	  WM_T_82540,		WMP_F_COPPER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1080 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1081 	  WM_T_82540,		WMP_F_COPPER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1084 	  "Intel i82540EP 1000BASE-T Ethernet",
   1085 	  WM_T_82540,		WMP_F_COPPER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1088 	  "Intel i82540EP 1000BASE-T Ethernet",
   1089 	  WM_T_82540,		WMP_F_COPPER },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1092 	  "Intel i82540EP 1000BASE-T Ethernet",
   1093 	  WM_T_82540,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1096 	  "Intel i82545EM 1000BASE-T Ethernet",
   1097 	  WM_T_82545,		WMP_F_COPPER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1100 	  "Intel i82545GM 1000BASE-T Ethernet",
   1101 	  WM_T_82545_3,		WMP_F_COPPER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1104 	  "Intel i82545GM 1000BASE-X Ethernet",
   1105 	  WM_T_82545_3,		WMP_F_FIBER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1108 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1109 	  WM_T_82545_3,		WMP_F_SERDES },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1112 	  "Intel i82546EB 1000BASE-T Ethernet",
   1113 	  WM_T_82546,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1116 	  "Intel i82546EB 1000BASE-T Ethernet",
   1117 	  WM_T_82546,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1120 	  "Intel i82545EM 1000BASE-X Ethernet",
   1121 	  WM_T_82545,		WMP_F_FIBER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1124 	  "Intel i82546EB 1000BASE-X Ethernet",
   1125 	  WM_T_82546,		WMP_F_FIBER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1128 	  "Intel i82546GB 1000BASE-T Ethernet",
   1129 	  WM_T_82546_3,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1132 	  "Intel i82546GB 1000BASE-X Ethernet",
   1133 	  WM_T_82546_3,		WMP_F_FIBER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1136 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1137 	  WM_T_82546_3,		WMP_F_SERDES },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1140 	  "i82546GB quad-port Gigabit Ethernet",
   1141 	  WM_T_82546_3,		WMP_F_COPPER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1144 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1145 	  WM_T_82546_3,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1148 	  "Intel PRO/1000MT (82546GB)",
   1149 	  WM_T_82546_3,		WMP_F_COPPER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1152 	  "Intel i82541EI 1000BASE-T Ethernet",
   1153 	  WM_T_82541,		WMP_F_COPPER },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1156 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1157 	  WM_T_82541,		WMP_F_COPPER },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1160 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1161 	  WM_T_82541,		WMP_F_COPPER },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1164 	  "Intel i82541ER 1000BASE-T Ethernet",
   1165 	  WM_T_82541_2,		WMP_F_COPPER },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1168 	  "Intel i82541GI 1000BASE-T Ethernet",
   1169 	  WM_T_82541_2,		WMP_F_COPPER },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1172 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1173 	  WM_T_82541_2,		WMP_F_COPPER },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1176 	  "Intel i82541PI 1000BASE-T Ethernet",
   1177 	  WM_T_82541_2,		WMP_F_COPPER },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1180 	  "Intel i82547EI 1000BASE-T Ethernet",
   1181 	  WM_T_82547,		WMP_F_COPPER },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1184 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1185 	  WM_T_82547,		WMP_F_COPPER },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1188 	  "Intel i82547GI 1000BASE-T Ethernet",
   1189 	  WM_T_82547_2,		WMP_F_COPPER },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1192 	  "Intel PRO/1000 PT (82571EB)",
   1193 	  WM_T_82571,		WMP_F_COPPER },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1196 	  "Intel PRO/1000 PF (82571EB)",
   1197 	  WM_T_82571,		WMP_F_FIBER },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1200 	  "Intel PRO/1000 PB (82571EB)",
   1201 	  WM_T_82571,		WMP_F_SERDES },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1204 	  "Intel PRO/1000 QT (82571EB)",
   1205 	  WM_T_82571,		WMP_F_COPPER },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1208 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1209 	  WM_T_82571,		WMP_F_COPPER },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1212 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1213 	  WM_T_82571,		WMP_F_COPPER },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1216 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1217 	  WM_T_82571,		WMP_F_SERDES },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1220 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1221 	  WM_T_82571,		WMP_F_SERDES },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1224 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1225 	  WM_T_82571,		WMP_F_FIBER },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1228 	  "Intel i82572EI 1000baseT Ethernet",
   1229 	  WM_T_82572,		WMP_F_COPPER },
   1230 
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1232 	  "Intel i82572EI 1000baseX Ethernet",
   1233 	  WM_T_82572,		WMP_F_FIBER },
   1234 
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1236 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1237 	  WM_T_82572,		WMP_F_SERDES },
   1238 
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1240 	  "Intel i82572EI 1000baseT Ethernet",
   1241 	  WM_T_82572,		WMP_F_COPPER },
   1242 
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1244 	  "Intel i82573E",
   1245 	  WM_T_82573,		WMP_F_COPPER },
   1246 
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1248 	  "Intel i82573E IAMT",
   1249 	  WM_T_82573,		WMP_F_COPPER },
   1250 
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1252 	  "Intel i82573L Gigabit Ethernet",
   1253 	  WM_T_82573,		WMP_F_COPPER },
   1254 
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1256 	  "Intel i82574L",
   1257 	  WM_T_82574,		WMP_F_COPPER },
   1258 
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1260 	  "Intel i82574L",
   1261 	  WM_T_82574,		WMP_F_COPPER },
   1262 
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1264 	  "Intel i82583V",
   1265 	  WM_T_82583,		WMP_F_COPPER },
   1266 
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1268 	  "i80003 dual 1000baseT Ethernet",
   1269 	  WM_T_80003,		WMP_F_COPPER },
   1270 
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1272 	  "i80003 dual 1000baseX Ethernet",
   1273 	  WM_T_80003,		WMP_F_COPPER },
   1274 
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1276 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1277 	  WM_T_80003,		WMP_F_SERDES },
   1278 
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1280 	  "Intel i80003 1000baseT Ethernet",
   1281 	  WM_T_80003,		WMP_F_COPPER },
   1282 
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1284 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1285 	  WM_T_80003,		WMP_F_SERDES },
   1286 
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1288 	  "Intel i82801H (M_AMT) LAN Controller",
   1289 	  WM_T_ICH8,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1291 	  "Intel i82801H (AMT) LAN Controller",
   1292 	  WM_T_ICH8,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1294 	  "Intel i82801H LAN Controller",
   1295 	  WM_T_ICH8,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1297 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1298 	  WM_T_ICH8,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1300 	  "Intel i82801H (M) LAN Controller",
   1301 	  WM_T_ICH8,		WMP_F_COPPER },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1303 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1304 	  WM_T_ICH8,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1306 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1307 	  WM_T_ICH8,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1309 	  "82567V-3 LAN Controller",
   1310 	  WM_T_ICH8,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1312 	  "82801I (AMT) LAN Controller",
   1313 	  WM_T_ICH9,		WMP_F_COPPER },
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1315 	  "82801I 10/100 LAN Controller",
   1316 	  WM_T_ICH9,		WMP_F_COPPER },
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1318 	  "82801I (G) 10/100 LAN Controller",
   1319 	  WM_T_ICH9,		WMP_F_COPPER },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1321 	  "82801I (GT) 10/100 LAN Controller",
   1322 	  WM_T_ICH9,		WMP_F_COPPER },
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1324 	  "82801I (C) LAN Controller",
   1325 	  WM_T_ICH9,		WMP_F_COPPER },
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1327 	  "82801I mobile LAN Controller",
   1328 	  WM_T_ICH9,		WMP_F_COPPER },
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1330 	  "82801I mobile (V) LAN Controller",
   1331 	  WM_T_ICH9,		WMP_F_COPPER },
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1333 	  "82801I mobile (AMT) LAN Controller",
   1334 	  WM_T_ICH9,		WMP_F_COPPER },
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1336 	  "82567LM-4 LAN Controller",
   1337 	  WM_T_ICH9,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1339 	  "82567LM-2 LAN Controller",
   1340 	  WM_T_ICH10,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1342 	  "82567LF-2 LAN Controller",
   1343 	  WM_T_ICH10,		WMP_F_COPPER },
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1345 	  "82567LM-3 LAN Controller",
   1346 	  WM_T_ICH10,		WMP_F_COPPER },
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1348 	  "82567LF-3 LAN Controller",
   1349 	  WM_T_ICH10,		WMP_F_COPPER },
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1351 	  "82567V-2 LAN Controller",
   1352 	  WM_T_ICH10,		WMP_F_COPPER },
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1354 	  "82567V-3? LAN Controller",
   1355 	  WM_T_ICH10,		WMP_F_COPPER },
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1357 	  "HANKSVILLE LAN Controller",
   1358 	  WM_T_ICH10,		WMP_F_COPPER },
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1360 	  "PCH LAN (82577LM) Controller",
   1361 	  WM_T_PCH,		WMP_F_COPPER },
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1363 	  "PCH LAN (82577LC) Controller",
   1364 	  WM_T_PCH,		WMP_F_COPPER },
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1366 	  "PCH LAN (82578DM) Controller",
   1367 	  WM_T_PCH,		WMP_F_COPPER },
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1369 	  "PCH LAN (82578DC) Controller",
   1370 	  WM_T_PCH,		WMP_F_COPPER },
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1372 	  "PCH2 LAN (82579LM) Controller",
   1373 	  WM_T_PCH2,		WMP_F_COPPER },
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1375 	  "PCH2 LAN (82579V) Controller",
   1376 	  WM_T_PCH2,		WMP_F_COPPER },
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1378 	  "82575EB dual-1000baseT Ethernet",
   1379 	  WM_T_82575,		WMP_F_COPPER },
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1381 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1382 	  WM_T_82575,		WMP_F_SERDES },
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1384 	  "82575GB quad-1000baseT Ethernet",
   1385 	  WM_T_82575,		WMP_F_COPPER },
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1387 	  "82575GB quad-1000baseT Ethernet (PM)",
   1388 	  WM_T_82575,		WMP_F_COPPER },
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1390 	  "82576 1000BaseT Ethernet",
   1391 	  WM_T_82576,		WMP_F_COPPER },
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1393 	  "82576 1000BaseX Ethernet",
   1394 	  WM_T_82576,		WMP_F_FIBER },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1397 	  "82576 gigabit Ethernet (SERDES)",
   1398 	  WM_T_82576,		WMP_F_SERDES },
   1399 
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1401 	  "82576 quad-1000BaseT Ethernet",
   1402 	  WM_T_82576,		WMP_F_COPPER },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1405 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1406 	  WM_T_82576,		WMP_F_COPPER },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1409 	  "82576 gigabit Ethernet",
   1410 	  WM_T_82576,		WMP_F_COPPER },
   1411 
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1413 	  "82576 gigabit Ethernet (SERDES)",
   1414 	  WM_T_82576,		WMP_F_SERDES },
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1416 	  "82576 quad-gigabit Ethernet (SERDES)",
   1417 	  WM_T_82576,		WMP_F_SERDES },
   1418 
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1420 	  "82580 1000BaseT Ethernet",
   1421 	  WM_T_82580,		WMP_F_COPPER },
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1423 	  "82580 1000BaseX Ethernet",
   1424 	  WM_T_82580,		WMP_F_FIBER },
   1425 
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1427 	  "82580 1000BaseT Ethernet (SERDES)",
   1428 	  WM_T_82580,		WMP_F_SERDES },
   1429 
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1431 	  "82580 gigabit Ethernet (SGMII)",
   1432 	  WM_T_82580,		WMP_F_COPPER },
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1434 	  "82580 dual-1000BaseT Ethernet",
   1435 	  WM_T_82580,		WMP_F_COPPER },
   1436 
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1438 	  "82580 quad-1000BaseX Ethernet",
   1439 	  WM_T_82580,		WMP_F_FIBER },
   1440 
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1442 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1443 	  WM_T_82580,		WMP_F_COPPER },
   1444 
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1446 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1447 	  WM_T_82580,		WMP_F_SERDES },
   1448 
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1450 	  "DH89XXCC 1000BASE-KX Ethernet",
   1451 	  WM_T_82580,		WMP_F_SERDES },
   1452 
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1454 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1455 	  WM_T_82580,		WMP_F_SERDES },
   1456 
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1458 	  "I350 Gigabit Network Connection",
   1459 	  WM_T_I350,		WMP_F_COPPER },
   1460 
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1462 	  "I350 Gigabit Fiber Network Connection",
   1463 	  WM_T_I350,		WMP_F_FIBER },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1466 	  "I350 Gigabit Backplane Connection",
   1467 	  WM_T_I350,		WMP_F_SERDES },
   1468 
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1470 	  "I350 Quad Port Gigabit Ethernet",
   1471 	  WM_T_I350,		WMP_F_SERDES },
   1472 
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1474 	  "I350 Gigabit Connection",
   1475 	  WM_T_I350,		WMP_F_COPPER },
   1476 
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1478 	  "I354 Gigabit Ethernet (KX)",
   1479 	  WM_T_I354,		WMP_F_SERDES },
   1480 
   1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1482 	  "I354 Gigabit Ethernet (SGMII)",
   1483 	  WM_T_I354,		WMP_F_COPPER },
   1484 
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1486 	  "I354 Gigabit Ethernet (2.5G)",
   1487 	  WM_T_I354,		WMP_F_COPPER },
   1488 
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1490 	  "I210-T1 Ethernet Server Adapter",
   1491 	  WM_T_I210,		WMP_F_COPPER },
   1492 
   1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1494 	  "I210 Ethernet (Copper OEM)",
   1495 	  WM_T_I210,		WMP_F_COPPER },
   1496 
   1497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1498 	  "I210 Ethernet (Copper IT)",
   1499 	  WM_T_I210,		WMP_F_COPPER },
   1500 
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1502 	  "I210 Ethernet (Copper, FLASH less)",
   1503 	  WM_T_I210,		WMP_F_COPPER },
   1504 
   1505 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1506 	  "I210 Gigabit Ethernet (Fiber)",
   1507 	  WM_T_I210,		WMP_F_FIBER },
   1508 
   1509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1510 	  "I210 Gigabit Ethernet (SERDES)",
   1511 	  WM_T_I210,		WMP_F_SERDES },
   1512 
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1514 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1515 	  WM_T_I210,		WMP_F_SERDES },
   1516 
   1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1518 	  "I210 Gigabit Ethernet (SGMII)",
   1519 	  WM_T_I210,		WMP_F_COPPER },
   1520 
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1522 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1523 	  WM_T_I210,		WMP_F_COPPER },
   1524 
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1526 	  "I211 Ethernet (COPPER)",
   1527 	  WM_T_I211,		WMP_F_COPPER },
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1529 	  "I217 V Ethernet Connection",
   1530 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1532 	  "I217 LM Ethernet Connection",
   1533 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1535 	  "I218 V Ethernet Connection",
   1536 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1538 	  "I218 V Ethernet Connection",
   1539 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1541 	  "I218 V Ethernet Connection",
   1542 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1544 	  "I218 LM Ethernet Connection",
   1545 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1547 	  "I218 LM Ethernet Connection",
   1548 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1550 	  "I218 LM Ethernet Connection",
   1551 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1553 	  "I219 LM Ethernet Connection",
   1554 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1556 	  "I219 LM Ethernet Connection",
   1557 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1559 	  "I219 LM Ethernet Connection",
   1560 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1562 	  "I219 LM Ethernet Connection",
   1563 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1564 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1565 	  "I219 LM Ethernet Connection",
   1566 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1567 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1568 	  "I219 LM Ethernet Connection",
   1569 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1570 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1571 	  "I219 LM Ethernet Connection",
   1572 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1573 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1574 	  "I219 LM Ethernet Connection",
   1575 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1577 	  "I219 LM Ethernet Connection",
   1578 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1580 	  "I219 LM Ethernet Connection",
   1581 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1582 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1583 	  "I219 LM Ethernet Connection",
   1584 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1585 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1586 	  "I219 LM Ethernet Connection",
   1587 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1588 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1589 	  "I219 LM Ethernet Connection",
   1590 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1591 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1592 	  "I219 LM Ethernet Connection",
   1593 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1594 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1595 	  "I219 LM Ethernet Connection",
   1596 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1597 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1598 	  "I219 V Ethernet Connection",
   1599 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1600 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1601 	  "I219 V Ethernet Connection",
   1602 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1603 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1604 	  "I219 V Ethernet Connection",
   1605 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1606 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1607 	  "I219 V Ethernet Connection",
   1608 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1609 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1610 	  "I219 V Ethernet Connection",
   1611 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1612 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1613 	  "I219 V Ethernet Connection",
   1614 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1615 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1616 	  "I219 V Ethernet Connection",
   1617 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1618 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1619 	  "I219 V Ethernet Connection",
   1620 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1621 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1622 	  "I219 V Ethernet Connection",
   1623 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1624 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1625 	  "I219 V Ethernet Connection",
   1626 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1627 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1628 	  "I219 V Ethernet Connection",
   1629 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1630 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1631 	  "I219 V Ethernet Connection",
   1632 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1633 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1634 	  "I219 V Ethernet Connection",
   1635 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1636 	{ 0,			0,
   1637 	  NULL,
   1638 	  0,			0 },
   1639 };
   1640 
   1641 /*
   1642  * Register read/write functions.
   1643  * Other than CSR_{READ|WRITE}().
   1644  */
   1645 
   1646 #if 0 /* Not currently used */
   1647 static inline uint32_t
   1648 wm_io_read(struct wm_softc *sc, int reg)
   1649 {
   1650 
   1651 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1652 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1653 }
   1654 #endif
   1655 
   1656 static inline void
   1657 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1658 {
   1659 
   1660 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1661 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1662 }
   1663 
   1664 static inline void
   1665 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1666     uint32_t data)
   1667 {
   1668 	uint32_t regval;
   1669 	int i;
   1670 
   1671 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1672 
   1673 	CSR_WRITE(sc, reg, regval);
   1674 
   1675 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1676 		delay(5);
   1677 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1678 			break;
   1679 	}
   1680 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1681 		aprint_error("%s: WARNING:"
   1682 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1683 		    device_xname(sc->sc_dev), reg);
   1684 	}
   1685 }
   1686 
   1687 static inline void
   1688 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1689 {
   1690 	wa->wa_low = htole32(v & 0xffffffffU);
   1691 	if (sizeof(bus_addr_t) == 8)
   1692 		wa->wa_high = htole32((uint64_t) v >> 32);
   1693 	else
   1694 		wa->wa_high = 0;
   1695 }
   1696 
   1697 /*
   1698  * Descriptor sync/init functions.
   1699  */
   1700 static inline void
   1701 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1702 {
   1703 	struct wm_softc *sc = txq->txq_sc;
   1704 
   1705 	/* If it will wrap around, sync to the end of the ring. */
   1706 	if ((start + num) > WM_NTXDESC(txq)) {
   1707 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1708 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1709 		    (WM_NTXDESC(txq) - start), ops);
   1710 		num -= (WM_NTXDESC(txq) - start);
   1711 		start = 0;
   1712 	}
   1713 
   1714 	/* Now sync whatever is left. */
   1715 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1716 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1717 }
   1718 
   1719 static inline void
   1720 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1721 {
   1722 	struct wm_softc *sc = rxq->rxq_sc;
   1723 
   1724 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1725 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1726 }
   1727 
   1728 static inline void
   1729 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1730 {
   1731 	struct wm_softc *sc = rxq->rxq_sc;
   1732 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1733 	struct mbuf *m = rxs->rxs_mbuf;
   1734 
   1735 	/*
   1736 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1737 	 * so that the payload after the Ethernet header is aligned
   1738 	 * to a 4-byte boundary.
   1739 
   1740 	 * XXX BRAINDAMAGE ALERT!
   1741 	 * The stupid chip uses the same size for every buffer, which
   1742 	 * is set in the Receive Control register.  We are using the 2K
   1743 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1744 	 * reason, we can't "scoot" packets longer than the standard
   1745 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1746 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1747 	 * the upper layer copy the headers.
   1748 	 */
   1749 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1750 
   1751 	if (sc->sc_type == WM_T_82574) {
   1752 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1753 		rxd->erx_data.erxd_addr =
   1754 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1755 		rxd->erx_data.erxd_dd = 0;
   1756 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1757 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1758 
   1759 		rxd->nqrx_data.nrxd_paddr =
   1760 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1761 		/* Currently, split header is not supported. */
   1762 		rxd->nqrx_data.nrxd_haddr = 0;
   1763 	} else {
   1764 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1765 
   1766 		wm_set_dma_addr(&rxd->wrx_addr,
   1767 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1768 		rxd->wrx_len = 0;
   1769 		rxd->wrx_cksum = 0;
   1770 		rxd->wrx_status = 0;
   1771 		rxd->wrx_errors = 0;
   1772 		rxd->wrx_special = 0;
   1773 	}
   1774 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1775 
   1776 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1777 }
   1778 
   1779 /*
   1780  * Device driver interface functions and commonly used functions.
   1781  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1782  */
   1783 
   1784 /* Lookup supported device table */
   1785 static const struct wm_product *
   1786 wm_lookup(const struct pci_attach_args *pa)
   1787 {
   1788 	const struct wm_product *wmp;
   1789 
   1790 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1791 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1792 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1793 			return wmp;
   1794 	}
   1795 	return NULL;
   1796 }
   1797 
   1798 /* The match function (ca_match) */
   1799 static int
   1800 wm_match(device_t parent, cfdata_t cf, void *aux)
   1801 {
   1802 	struct pci_attach_args *pa = aux;
   1803 
   1804 	if (wm_lookup(pa) != NULL)
   1805 		return 1;
   1806 
   1807 	return 0;
   1808 }
   1809 
   1810 /* The attach function (ca_attach) */
   1811 static void
   1812 wm_attach(device_t parent, device_t self, void *aux)
   1813 {
   1814 	struct wm_softc *sc = device_private(self);
   1815 	struct pci_attach_args *pa = aux;
   1816 	prop_dictionary_t dict;
   1817 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1818 	pci_chipset_tag_t pc = pa->pa_pc;
   1819 	int counts[PCI_INTR_TYPE_SIZE];
   1820 	pci_intr_type_t max_type;
   1821 	const char *eetype, *xname;
   1822 	bus_space_tag_t memt;
   1823 	bus_space_handle_t memh;
   1824 	bus_size_t memsize;
   1825 	int memh_valid;
   1826 	int i, error;
   1827 	const struct wm_product *wmp;
   1828 	prop_data_t ea;
   1829 	prop_number_t pn;
   1830 	uint8_t enaddr[ETHER_ADDR_LEN];
   1831 	char buf[256];
   1832 	char wqname[MAXCOMLEN];
   1833 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1834 	pcireg_t preg, memtype;
   1835 	uint16_t eeprom_data, apme_mask;
   1836 	bool force_clear_smbi;
   1837 	uint32_t link_mode;
   1838 	uint32_t reg;
   1839 
   1840 	sc->sc_dev = self;
   1841 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1842 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1843 	sc->sc_core_stopping = false;
   1844 
   1845 	wmp = wm_lookup(pa);
   1846 #ifdef DIAGNOSTIC
   1847 	if (wmp == NULL) {
   1848 		printf("\n");
   1849 		panic("wm_attach: impossible");
   1850 	}
   1851 #endif
   1852 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1853 
   1854 	sc->sc_pc = pa->pa_pc;
   1855 	sc->sc_pcitag = pa->pa_tag;
   1856 
   1857 	if (pci_dma64_available(pa))
   1858 		sc->sc_dmat = pa->pa_dmat64;
   1859 	else
   1860 		sc->sc_dmat = pa->pa_dmat;
   1861 
   1862 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1863 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1864 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1865 
   1866 	sc->sc_type = wmp->wmp_type;
   1867 
   1868 	/* Set default function pointers */
   1869 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1870 	sc->phy.release = sc->nvm.release = wm_put_null;
   1871 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1872 
   1873 	if (sc->sc_type < WM_T_82543) {
   1874 		if (sc->sc_rev < 2) {
   1875 			aprint_error_dev(sc->sc_dev,
   1876 			    "i82542 must be at least rev. 2\n");
   1877 			return;
   1878 		}
   1879 		if (sc->sc_rev < 3)
   1880 			sc->sc_type = WM_T_82542_2_0;
   1881 	}
   1882 
   1883 	/*
   1884 	 * Disable MSI for Errata:
   1885 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1886 	 *
   1887 	 *  82544: Errata 25
   1888 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1889 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1890 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1891 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1892 	 *
   1893 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1894 	 *
   1895 	 *  82571 & 82572: Errata 63
   1896 	 */
   1897 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1898 	    || (sc->sc_type == WM_T_82572))
   1899 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1900 
   1901 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1902 	    || (sc->sc_type == WM_T_82580)
   1903 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1904 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1905 		sc->sc_flags |= WM_F_NEWQUEUE;
   1906 
   1907 	/* Set device properties (mactype) */
   1908 	dict = device_properties(sc->sc_dev);
   1909 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1910 
   1911 	/*
   1912 	 * Map the device.  All devices support memory-mapped acccess,
   1913 	 * and it is really required for normal operation.
   1914 	 */
   1915 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1916 	switch (memtype) {
   1917 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1918 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1919 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1920 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1921 		break;
   1922 	default:
   1923 		memh_valid = 0;
   1924 		break;
   1925 	}
   1926 
   1927 	if (memh_valid) {
   1928 		sc->sc_st = memt;
   1929 		sc->sc_sh = memh;
   1930 		sc->sc_ss = memsize;
   1931 	} else {
   1932 		aprint_error_dev(sc->sc_dev,
   1933 		    "unable to map device registers\n");
   1934 		return;
   1935 	}
   1936 
   1937 	/*
   1938 	 * In addition, i82544 and later support I/O mapped indirect
   1939 	 * register access.  It is not desirable (nor supported in
   1940 	 * this driver) to use it for normal operation, though it is
   1941 	 * required to work around bugs in some chip versions.
   1942 	 */
   1943 	if (sc->sc_type >= WM_T_82544) {
   1944 		/* First we have to find the I/O BAR. */
   1945 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1946 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1947 			if (memtype == PCI_MAPREG_TYPE_IO)
   1948 				break;
   1949 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1950 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1951 				i += 4;	/* skip high bits, too */
   1952 		}
   1953 		if (i < PCI_MAPREG_END) {
   1954 			/*
   1955 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1956 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1957 			 * It's no problem because newer chips has no this
   1958 			 * bug.
   1959 			 *
   1960 			 * The i8254x doesn't apparently respond when the
   1961 			 * I/O BAR is 0, which looks somewhat like it's not
   1962 			 * been configured.
   1963 			 */
   1964 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1965 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1966 				aprint_error_dev(sc->sc_dev,
   1967 				    "WARNING: I/O BAR at zero.\n");
   1968 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1969 					0, &sc->sc_iot, &sc->sc_ioh,
   1970 					NULL, &sc->sc_ios) == 0) {
   1971 				sc->sc_flags |= WM_F_IOH_VALID;
   1972 			} else
   1973 				aprint_error_dev(sc->sc_dev,
   1974 				    "WARNING: unable to map I/O space\n");
   1975 		}
   1976 
   1977 	}
   1978 
   1979 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1980 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1981 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1982 	if (sc->sc_type < WM_T_82542_2_1)
   1983 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1984 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1985 
   1986 	/* Power up chip */
   1987 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1988 	    && error != EOPNOTSUPP) {
   1989 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1990 		return;
   1991 	}
   1992 
   1993 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1994 	/*
   1995 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1996 	 * resource.
   1997 	 */
   1998 	if (sc->sc_nqueues > 1) {
   1999 		max_type = PCI_INTR_TYPE_MSIX;
   2000 		/*
   2001 		 *  82583 has a MSI-X capability in the PCI configuration space
   2002 		 * but it doesn't support it. At least the document doesn't
   2003 		 * say anything about MSI-X.
   2004 		 */
   2005 		counts[PCI_INTR_TYPE_MSIX]
   2006 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2007 	} else {
   2008 		max_type = PCI_INTR_TYPE_MSI;
   2009 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2010 	}
   2011 
   2012 	/* Allocation settings */
   2013 	counts[PCI_INTR_TYPE_MSI] = 1;
   2014 	counts[PCI_INTR_TYPE_INTX] = 1;
   2015 	/* overridden by disable flags */
   2016 	if (wm_disable_msi != 0) {
   2017 		counts[PCI_INTR_TYPE_MSI] = 0;
   2018 		if (wm_disable_msix != 0) {
   2019 			max_type = PCI_INTR_TYPE_INTX;
   2020 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2021 		}
   2022 	} else if (wm_disable_msix != 0) {
   2023 		max_type = PCI_INTR_TYPE_MSI;
   2024 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2025 	}
   2026 
   2027 alloc_retry:
   2028 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2029 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2030 		return;
   2031 	}
   2032 
   2033 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2034 		error = wm_setup_msix(sc);
   2035 		if (error) {
   2036 			pci_intr_release(pc, sc->sc_intrs,
   2037 			    counts[PCI_INTR_TYPE_MSIX]);
   2038 
   2039 			/* Setup for MSI: Disable MSI-X */
   2040 			max_type = PCI_INTR_TYPE_MSI;
   2041 			counts[PCI_INTR_TYPE_MSI] = 1;
   2042 			counts[PCI_INTR_TYPE_INTX] = 1;
   2043 			goto alloc_retry;
   2044 		}
   2045 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2046 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2047 		error = wm_setup_legacy(sc);
   2048 		if (error) {
   2049 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2050 			    counts[PCI_INTR_TYPE_MSI]);
   2051 
   2052 			/* The next try is for INTx: Disable MSI */
   2053 			max_type = PCI_INTR_TYPE_INTX;
   2054 			counts[PCI_INTR_TYPE_INTX] = 1;
   2055 			goto alloc_retry;
   2056 		}
   2057 	} else {
   2058 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2059 		error = wm_setup_legacy(sc);
   2060 		if (error) {
   2061 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2062 			    counts[PCI_INTR_TYPE_INTX]);
   2063 			return;
   2064 		}
   2065 	}
   2066 
   2067 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2068 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2069 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2070 	    WM_WORKQUEUE_FLAGS);
   2071 	if (error) {
   2072 		aprint_error_dev(sc->sc_dev,
   2073 		    "unable to create workqueue\n");
   2074 		goto out;
   2075 	}
   2076 
   2077 	/*
   2078 	 * Check the function ID (unit number of the chip).
   2079 	 */
   2080 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2081 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2082 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2083 	    || (sc->sc_type == WM_T_82580)
   2084 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2085 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2086 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2087 	else
   2088 		sc->sc_funcid = 0;
   2089 
   2090 	/*
   2091 	 * Determine a few things about the bus we're connected to.
   2092 	 */
   2093 	if (sc->sc_type < WM_T_82543) {
   2094 		/* We don't really know the bus characteristics here. */
   2095 		sc->sc_bus_speed = 33;
   2096 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2097 		/*
   2098 		 * CSA (Communication Streaming Architecture) is about as fast
   2099 		 * a 32-bit 66MHz PCI Bus.
   2100 		 */
   2101 		sc->sc_flags |= WM_F_CSA;
   2102 		sc->sc_bus_speed = 66;
   2103 		aprint_verbose_dev(sc->sc_dev,
   2104 		    "Communication Streaming Architecture\n");
   2105 		if (sc->sc_type == WM_T_82547) {
   2106 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2107 			callout_setfunc(&sc->sc_txfifo_ch,
   2108 			    wm_82547_txfifo_stall, sc);
   2109 			aprint_verbose_dev(sc->sc_dev,
   2110 			    "using 82547 Tx FIFO stall work-around\n");
   2111 		}
   2112 	} else if (sc->sc_type >= WM_T_82571) {
   2113 		sc->sc_flags |= WM_F_PCIE;
   2114 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2115 		    && (sc->sc_type != WM_T_ICH10)
   2116 		    && (sc->sc_type != WM_T_PCH)
   2117 		    && (sc->sc_type != WM_T_PCH2)
   2118 		    && (sc->sc_type != WM_T_PCH_LPT)
   2119 		    && (sc->sc_type != WM_T_PCH_SPT)
   2120 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2121 			/* ICH* and PCH* have no PCIe capability registers */
   2122 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2123 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2124 				NULL) == 0)
   2125 				aprint_error_dev(sc->sc_dev,
   2126 				    "unable to find PCIe capability\n");
   2127 		}
   2128 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2129 	} else {
   2130 		reg = CSR_READ(sc, WMREG_STATUS);
   2131 		if (reg & STATUS_BUS64)
   2132 			sc->sc_flags |= WM_F_BUS64;
   2133 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2134 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2135 
   2136 			sc->sc_flags |= WM_F_PCIX;
   2137 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2138 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2139 				aprint_error_dev(sc->sc_dev,
   2140 				    "unable to find PCIX capability\n");
   2141 			else if (sc->sc_type != WM_T_82545_3 &&
   2142 				 sc->sc_type != WM_T_82546_3) {
   2143 				/*
   2144 				 * Work around a problem caused by the BIOS
   2145 				 * setting the max memory read byte count
   2146 				 * incorrectly.
   2147 				 */
   2148 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2149 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2150 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2151 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2152 
   2153 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2154 				    PCIX_CMD_BYTECNT_SHIFT;
   2155 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2156 				    PCIX_STATUS_MAXB_SHIFT;
   2157 				if (bytecnt > maxb) {
   2158 					aprint_verbose_dev(sc->sc_dev,
   2159 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2160 					    512 << bytecnt, 512 << maxb);
   2161 					pcix_cmd = (pcix_cmd &
   2162 					    ~PCIX_CMD_BYTECNT_MASK) |
   2163 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2164 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2165 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2166 					    pcix_cmd);
   2167 				}
   2168 			}
   2169 		}
   2170 		/*
   2171 		 * The quad port adapter is special; it has a PCIX-PCIX
   2172 		 * bridge on the board, and can run the secondary bus at
   2173 		 * a higher speed.
   2174 		 */
   2175 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2176 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2177 								      : 66;
   2178 		} else if (sc->sc_flags & WM_F_PCIX) {
   2179 			switch (reg & STATUS_PCIXSPD_MASK) {
   2180 			case STATUS_PCIXSPD_50_66:
   2181 				sc->sc_bus_speed = 66;
   2182 				break;
   2183 			case STATUS_PCIXSPD_66_100:
   2184 				sc->sc_bus_speed = 100;
   2185 				break;
   2186 			case STATUS_PCIXSPD_100_133:
   2187 				sc->sc_bus_speed = 133;
   2188 				break;
   2189 			default:
   2190 				aprint_error_dev(sc->sc_dev,
   2191 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2192 				    reg & STATUS_PCIXSPD_MASK);
   2193 				sc->sc_bus_speed = 66;
   2194 				break;
   2195 			}
   2196 		} else
   2197 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2198 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2199 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2200 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2201 	}
   2202 
   2203 	/* clear interesting stat counters */
   2204 	CSR_READ(sc, WMREG_COLC);
   2205 	CSR_READ(sc, WMREG_RXERRC);
   2206 
   2207 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2208 	    || (sc->sc_type >= WM_T_ICH8))
   2209 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2210 	if (sc->sc_type >= WM_T_ICH8)
   2211 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2212 
   2213 	/* Set PHY, NVM mutex related stuff */
   2214 	switch (sc->sc_type) {
   2215 	case WM_T_82542_2_0:
   2216 	case WM_T_82542_2_1:
   2217 	case WM_T_82543:
   2218 	case WM_T_82544:
   2219 		/* Microwire */
   2220 		sc->nvm.read = wm_nvm_read_uwire;
   2221 		sc->sc_nvm_wordsize = 64;
   2222 		sc->sc_nvm_addrbits = 6;
   2223 		break;
   2224 	case WM_T_82540:
   2225 	case WM_T_82545:
   2226 	case WM_T_82545_3:
   2227 	case WM_T_82546:
   2228 	case WM_T_82546_3:
   2229 		/* Microwire */
   2230 		sc->nvm.read = wm_nvm_read_uwire;
   2231 		reg = CSR_READ(sc, WMREG_EECD);
   2232 		if (reg & EECD_EE_SIZE) {
   2233 			sc->sc_nvm_wordsize = 256;
   2234 			sc->sc_nvm_addrbits = 8;
   2235 		} else {
   2236 			sc->sc_nvm_wordsize = 64;
   2237 			sc->sc_nvm_addrbits = 6;
   2238 		}
   2239 		sc->sc_flags |= WM_F_LOCK_EECD;
   2240 		sc->nvm.acquire = wm_get_eecd;
   2241 		sc->nvm.release = wm_put_eecd;
   2242 		break;
   2243 	case WM_T_82541:
   2244 	case WM_T_82541_2:
   2245 	case WM_T_82547:
   2246 	case WM_T_82547_2:
   2247 		reg = CSR_READ(sc, WMREG_EECD);
   2248 		/*
   2249 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2250 		 * on 8254[17], so set flags and functios before calling it.
   2251 		 */
   2252 		sc->sc_flags |= WM_F_LOCK_EECD;
   2253 		sc->nvm.acquire = wm_get_eecd;
   2254 		sc->nvm.release = wm_put_eecd;
   2255 		if (reg & EECD_EE_TYPE) {
   2256 			/* SPI */
   2257 			sc->nvm.read = wm_nvm_read_spi;
   2258 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2259 			wm_nvm_set_addrbits_size_eecd(sc);
   2260 		} else {
   2261 			/* Microwire */
   2262 			sc->nvm.read = wm_nvm_read_uwire;
   2263 			if ((reg & EECD_EE_ABITS) != 0) {
   2264 				sc->sc_nvm_wordsize = 256;
   2265 				sc->sc_nvm_addrbits = 8;
   2266 			} else {
   2267 				sc->sc_nvm_wordsize = 64;
   2268 				sc->sc_nvm_addrbits = 6;
   2269 			}
   2270 		}
   2271 		break;
   2272 	case WM_T_82571:
   2273 	case WM_T_82572:
   2274 		/* SPI */
   2275 		sc->nvm.read = wm_nvm_read_eerd;
   2276 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2277 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2278 		wm_nvm_set_addrbits_size_eecd(sc);
   2279 		sc->phy.acquire = wm_get_swsm_semaphore;
   2280 		sc->phy.release = wm_put_swsm_semaphore;
   2281 		sc->nvm.acquire = wm_get_nvm_82571;
   2282 		sc->nvm.release = wm_put_nvm_82571;
   2283 		break;
   2284 	case WM_T_82573:
   2285 	case WM_T_82574:
   2286 	case WM_T_82583:
   2287 		sc->nvm.read = wm_nvm_read_eerd;
   2288 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2289 		if (sc->sc_type == WM_T_82573) {
   2290 			sc->phy.acquire = wm_get_swsm_semaphore;
   2291 			sc->phy.release = wm_put_swsm_semaphore;
   2292 			sc->nvm.acquire = wm_get_nvm_82571;
   2293 			sc->nvm.release = wm_put_nvm_82571;
   2294 		} else {
   2295 			/* Both PHY and NVM use the same semaphore. */
   2296 			sc->phy.acquire = sc->nvm.acquire
   2297 			    = wm_get_swfwhw_semaphore;
   2298 			sc->phy.release = sc->nvm.release
   2299 			    = wm_put_swfwhw_semaphore;
   2300 		}
   2301 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2302 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2303 			sc->sc_nvm_wordsize = 2048;
   2304 		} else {
   2305 			/* SPI */
   2306 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2307 			wm_nvm_set_addrbits_size_eecd(sc);
   2308 		}
   2309 		break;
   2310 	case WM_T_82575:
   2311 	case WM_T_82576:
   2312 	case WM_T_82580:
   2313 	case WM_T_I350:
   2314 	case WM_T_I354:
   2315 	case WM_T_80003:
   2316 		/* SPI */
   2317 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2318 		wm_nvm_set_addrbits_size_eecd(sc);
   2319 		if ((sc->sc_type == WM_T_80003)
   2320 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2321 			sc->nvm.read = wm_nvm_read_eerd;
   2322 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2323 		} else {
   2324 			sc->nvm.read = wm_nvm_read_spi;
   2325 			sc->sc_flags |= WM_F_LOCK_EECD;
   2326 		}
   2327 		sc->phy.acquire = wm_get_phy_82575;
   2328 		sc->phy.release = wm_put_phy_82575;
   2329 		sc->nvm.acquire = wm_get_nvm_80003;
   2330 		sc->nvm.release = wm_put_nvm_80003;
   2331 		break;
   2332 	case WM_T_ICH8:
   2333 	case WM_T_ICH9:
   2334 	case WM_T_ICH10:
   2335 	case WM_T_PCH:
   2336 	case WM_T_PCH2:
   2337 	case WM_T_PCH_LPT:
   2338 		sc->nvm.read = wm_nvm_read_ich8;
   2339 		/* FLASH */
   2340 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2341 		sc->sc_nvm_wordsize = 2048;
   2342 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2343 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2344 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2345 			aprint_error_dev(sc->sc_dev,
   2346 			    "can't map FLASH registers\n");
   2347 			goto out;
   2348 		}
   2349 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2350 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2351 		    ICH_FLASH_SECTOR_SIZE;
   2352 		sc->sc_ich8_flash_bank_size =
   2353 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2354 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2355 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2356 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2357 		sc->sc_flashreg_offset = 0;
   2358 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2359 		sc->phy.release = wm_put_swflag_ich8lan;
   2360 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2361 		sc->nvm.release = wm_put_nvm_ich8lan;
   2362 		break;
   2363 	case WM_T_PCH_SPT:
   2364 	case WM_T_PCH_CNP:
   2365 		sc->nvm.read = wm_nvm_read_spt;
   2366 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2367 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2368 		sc->sc_flasht = sc->sc_st;
   2369 		sc->sc_flashh = sc->sc_sh;
   2370 		sc->sc_ich8_flash_base = 0;
   2371 		sc->sc_nvm_wordsize =
   2372 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2373 		    * NVM_SIZE_MULTIPLIER;
   2374 		/* It is size in bytes, we want words */
   2375 		sc->sc_nvm_wordsize /= 2;
   2376 		/* Assume 2 banks */
   2377 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2378 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2379 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2380 		sc->phy.release = wm_put_swflag_ich8lan;
   2381 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2382 		sc->nvm.release = wm_put_nvm_ich8lan;
   2383 		break;
   2384 	case WM_T_I210:
   2385 	case WM_T_I211:
   2386 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2387 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2388 		if (wm_nvm_flash_presence_i210(sc)) {
   2389 			sc->nvm.read = wm_nvm_read_eerd;
   2390 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2391 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2392 			wm_nvm_set_addrbits_size_eecd(sc);
   2393 		} else {
   2394 			sc->nvm.read = wm_nvm_read_invm;
   2395 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2396 			sc->sc_nvm_wordsize = INVM_SIZE;
   2397 		}
   2398 		sc->phy.acquire = wm_get_phy_82575;
   2399 		sc->phy.release = wm_put_phy_82575;
   2400 		sc->nvm.acquire = wm_get_nvm_80003;
   2401 		sc->nvm.release = wm_put_nvm_80003;
   2402 		break;
   2403 	default:
   2404 		break;
   2405 	}
   2406 
   2407 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2408 	switch (sc->sc_type) {
   2409 	case WM_T_82571:
   2410 	case WM_T_82572:
   2411 		reg = CSR_READ(sc, WMREG_SWSM2);
   2412 		if ((reg & SWSM2_LOCK) == 0) {
   2413 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2414 			force_clear_smbi = true;
   2415 		} else
   2416 			force_clear_smbi = false;
   2417 		break;
   2418 	case WM_T_82573:
   2419 	case WM_T_82574:
   2420 	case WM_T_82583:
   2421 		force_clear_smbi = true;
   2422 		break;
   2423 	default:
   2424 		force_clear_smbi = false;
   2425 		break;
   2426 	}
   2427 	if (force_clear_smbi) {
   2428 		reg = CSR_READ(sc, WMREG_SWSM);
   2429 		if ((reg & SWSM_SMBI) != 0)
   2430 			aprint_error_dev(sc->sc_dev,
   2431 			    "Please update the Bootagent\n");
   2432 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2433 	}
   2434 
   2435 	/*
   2436 	 * Defer printing the EEPROM type until after verifying the checksum
   2437 	 * This allows the EEPROM type to be printed correctly in the case
   2438 	 * that no EEPROM is attached.
   2439 	 */
   2440 	/*
   2441 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2442 	 * this for later, so we can fail future reads from the EEPROM.
   2443 	 */
   2444 	if (wm_nvm_validate_checksum(sc)) {
   2445 		/*
   2446 		 * Read twice again because some PCI-e parts fail the
   2447 		 * first check due to the link being in sleep state.
   2448 		 */
   2449 		if (wm_nvm_validate_checksum(sc))
   2450 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2451 	}
   2452 
   2453 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2454 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2455 	else {
   2456 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2457 		    sc->sc_nvm_wordsize);
   2458 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2459 			aprint_verbose("iNVM");
   2460 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2461 			aprint_verbose("FLASH(HW)");
   2462 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2463 			aprint_verbose("FLASH");
   2464 		else {
   2465 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2466 				eetype = "SPI";
   2467 			else
   2468 				eetype = "MicroWire";
   2469 			aprint_verbose("(%d address bits) %s EEPROM",
   2470 			    sc->sc_nvm_addrbits, eetype);
   2471 		}
   2472 	}
   2473 	wm_nvm_version(sc);
   2474 	aprint_verbose("\n");
   2475 
   2476 	/*
   2477 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2478 	 * incorrect.
   2479 	 */
   2480 	wm_gmii_setup_phytype(sc, 0, 0);
   2481 
   2482 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2483 	switch (sc->sc_type) {
   2484 	case WM_T_ICH8:
   2485 	case WM_T_ICH9:
   2486 	case WM_T_ICH10:
   2487 	case WM_T_PCH:
   2488 	case WM_T_PCH2:
   2489 	case WM_T_PCH_LPT:
   2490 	case WM_T_PCH_SPT:
   2491 	case WM_T_PCH_CNP:
   2492 		apme_mask = WUC_APME;
   2493 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2494 		if ((eeprom_data & apme_mask) != 0)
   2495 			sc->sc_flags |= WM_F_WOL;
   2496 		break;
   2497 	default:
   2498 		break;
   2499 	}
   2500 
   2501 	/* Reset the chip to a known state. */
   2502 	wm_reset(sc);
   2503 
   2504 	/*
   2505 	 * Check for I21[01] PLL workaround.
   2506 	 *
   2507 	 * Three cases:
   2508 	 * a) Chip is I211.
   2509 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2510 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2511 	 */
   2512 	if (sc->sc_type == WM_T_I211)
   2513 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2514 	if (sc->sc_type == WM_T_I210) {
   2515 		if (!wm_nvm_flash_presence_i210(sc))
   2516 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2517 		else if ((sc->sc_nvm_ver_major < 3)
   2518 		    || ((sc->sc_nvm_ver_major == 3)
   2519 			&& (sc->sc_nvm_ver_minor < 25))) {
   2520 			aprint_verbose_dev(sc->sc_dev,
   2521 			    "ROM image version %d.%d is older than 3.25\n",
   2522 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2523 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2524 		}
   2525 	}
   2526 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2527 		wm_pll_workaround_i210(sc);
   2528 
   2529 	wm_get_wakeup(sc);
   2530 
   2531 	/* Non-AMT based hardware can now take control from firmware */
   2532 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2533 		wm_get_hw_control(sc);
   2534 
   2535 	/*
   2536 	 * Read the Ethernet address from the EEPROM, if not first found
   2537 	 * in device properties.
   2538 	 */
   2539 	ea = prop_dictionary_get(dict, "mac-address");
   2540 	if (ea != NULL) {
   2541 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2542 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2543 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2544 	} else {
   2545 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2546 			aprint_error_dev(sc->sc_dev,
   2547 			    "unable to read Ethernet address\n");
   2548 			goto out;
   2549 		}
   2550 	}
   2551 
   2552 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2553 	    ether_sprintf(enaddr));
   2554 
   2555 	/*
   2556 	 * Read the config info from the EEPROM, and set up various
   2557 	 * bits in the control registers based on their contents.
   2558 	 */
   2559 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2560 	if (pn != NULL) {
   2561 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2562 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2563 	} else {
   2564 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2565 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2566 			goto out;
   2567 		}
   2568 	}
   2569 
   2570 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2571 	if (pn != NULL) {
   2572 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2573 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2574 	} else {
   2575 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2576 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2577 			goto out;
   2578 		}
   2579 	}
   2580 
   2581 	/* check for WM_F_WOL */
   2582 	switch (sc->sc_type) {
   2583 	case WM_T_82542_2_0:
   2584 	case WM_T_82542_2_1:
   2585 	case WM_T_82543:
   2586 		/* dummy? */
   2587 		eeprom_data = 0;
   2588 		apme_mask = NVM_CFG3_APME;
   2589 		break;
   2590 	case WM_T_82544:
   2591 		apme_mask = NVM_CFG2_82544_APM_EN;
   2592 		eeprom_data = cfg2;
   2593 		break;
   2594 	case WM_T_82546:
   2595 	case WM_T_82546_3:
   2596 	case WM_T_82571:
   2597 	case WM_T_82572:
   2598 	case WM_T_82573:
   2599 	case WM_T_82574:
   2600 	case WM_T_82583:
   2601 	case WM_T_80003:
   2602 	case WM_T_82575:
   2603 	case WM_T_82576:
   2604 		apme_mask = NVM_CFG3_APME;
   2605 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2606 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2607 		break;
   2608 	case WM_T_82580:
   2609 	case WM_T_I350:
   2610 	case WM_T_I354:
   2611 	case WM_T_I210:
   2612 	case WM_T_I211:
   2613 		apme_mask = NVM_CFG3_APME;
   2614 		wm_nvm_read(sc,
   2615 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2616 		    1, &eeprom_data);
   2617 		break;
   2618 	case WM_T_ICH8:
   2619 	case WM_T_ICH9:
   2620 	case WM_T_ICH10:
   2621 	case WM_T_PCH:
   2622 	case WM_T_PCH2:
   2623 	case WM_T_PCH_LPT:
   2624 	case WM_T_PCH_SPT:
   2625 	case WM_T_PCH_CNP:
   2626 		/* Already checked before wm_reset () */
   2627 		apme_mask = eeprom_data = 0;
   2628 		break;
   2629 	default: /* XXX 82540 */
   2630 		apme_mask = NVM_CFG3_APME;
   2631 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2632 		break;
   2633 	}
   2634 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2635 	if ((eeprom_data & apme_mask) != 0)
   2636 		sc->sc_flags |= WM_F_WOL;
   2637 
   2638 	/*
   2639 	 * We have the eeprom settings, now apply the special cases
   2640 	 * where the eeprom may be wrong or the board won't support
   2641 	 * wake on lan on a particular port
   2642 	 */
   2643 	switch (sc->sc_pcidevid) {
   2644 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2645 		sc->sc_flags &= ~WM_F_WOL;
   2646 		break;
   2647 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2648 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2649 		/* Wake events only supported on port A for dual fiber
   2650 		 * regardless of eeprom setting */
   2651 		if (sc->sc_funcid == 1)
   2652 			sc->sc_flags &= ~WM_F_WOL;
   2653 		break;
   2654 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2655 		/* If quad port adapter, disable WoL on all but port A */
   2656 		if (sc->sc_funcid != 0)
   2657 			sc->sc_flags &= ~WM_F_WOL;
   2658 		break;
   2659 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2660 		/* Wake events only supported on port A for dual fiber
   2661 		 * regardless of eeprom setting */
   2662 		if (sc->sc_funcid == 1)
   2663 			sc->sc_flags &= ~WM_F_WOL;
   2664 		break;
   2665 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2666 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2667 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2668 		/* If quad port adapter, disable WoL on all but port A */
   2669 		if (sc->sc_funcid != 0)
   2670 			sc->sc_flags &= ~WM_F_WOL;
   2671 		break;
   2672 	}
   2673 
   2674 	if (sc->sc_type >= WM_T_82575) {
   2675 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2676 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2677 			    nvmword);
   2678 			if ((sc->sc_type == WM_T_82575) ||
   2679 			    (sc->sc_type == WM_T_82576)) {
   2680 				/* Check NVM for autonegotiation */
   2681 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2682 				    != 0)
   2683 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2684 			}
   2685 			if ((sc->sc_type == WM_T_82575) ||
   2686 			    (sc->sc_type == WM_T_I350)) {
   2687 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2688 					sc->sc_flags |= WM_F_MAS;
   2689 			}
   2690 		}
   2691 	}
   2692 
   2693 	/*
   2694 	 * XXX need special handling for some multiple port cards
   2695 	 * to disable a paticular port.
   2696 	 */
   2697 
   2698 	if (sc->sc_type >= WM_T_82544) {
   2699 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2700 		if (pn != NULL) {
   2701 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2702 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2703 		} else {
   2704 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2705 				aprint_error_dev(sc->sc_dev,
   2706 				    "unable to read SWDPIN\n");
   2707 				goto out;
   2708 			}
   2709 		}
   2710 	}
   2711 
   2712 	if (cfg1 & NVM_CFG1_ILOS)
   2713 		sc->sc_ctrl |= CTRL_ILOS;
   2714 
   2715 	/*
   2716 	 * XXX
   2717 	 * This code isn't correct because pin 2 and 3 are located
   2718 	 * in different position on newer chips. Check all datasheet.
   2719 	 *
   2720 	 * Until resolve this problem, check if a chip < 82580
   2721 	 */
   2722 	if (sc->sc_type <= WM_T_82580) {
   2723 		if (sc->sc_type >= WM_T_82544) {
   2724 			sc->sc_ctrl |=
   2725 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2726 			    CTRL_SWDPIO_SHIFT;
   2727 			sc->sc_ctrl |=
   2728 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2729 			    CTRL_SWDPINS_SHIFT;
   2730 		} else {
   2731 			sc->sc_ctrl |=
   2732 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2733 			    CTRL_SWDPIO_SHIFT;
   2734 		}
   2735 	}
   2736 
   2737 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2738 		wm_nvm_read(sc,
   2739 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2740 		    1, &nvmword);
   2741 		if (nvmword & NVM_CFG3_ILOS)
   2742 			sc->sc_ctrl |= CTRL_ILOS;
   2743 	}
   2744 
   2745 #if 0
   2746 	if (sc->sc_type >= WM_T_82544) {
   2747 		if (cfg1 & NVM_CFG1_IPS0)
   2748 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2749 		if (cfg1 & NVM_CFG1_IPS1)
   2750 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2751 		sc->sc_ctrl_ext |=
   2752 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2753 		    CTRL_EXT_SWDPIO_SHIFT;
   2754 		sc->sc_ctrl_ext |=
   2755 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2756 		    CTRL_EXT_SWDPINS_SHIFT;
   2757 	} else {
   2758 		sc->sc_ctrl_ext |=
   2759 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2760 		    CTRL_EXT_SWDPIO_SHIFT;
   2761 	}
   2762 #endif
   2763 
   2764 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2765 #if 0
   2766 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2767 #endif
   2768 
   2769 	if (sc->sc_type == WM_T_PCH) {
   2770 		uint16_t val;
   2771 
   2772 		/* Save the NVM K1 bit setting */
   2773 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2774 
   2775 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2776 			sc->sc_nvm_k1_enabled = 1;
   2777 		else
   2778 			sc->sc_nvm_k1_enabled = 0;
   2779 	}
   2780 
   2781 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2782 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2783 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2784 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2785 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2786 	    || sc->sc_type == WM_T_82573
   2787 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2788 		/* Copper only */
   2789 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2790 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2791 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2792 	    || (sc->sc_type ==WM_T_I211)) {
   2793 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2794 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2795 		switch (link_mode) {
   2796 		case CTRL_EXT_LINK_MODE_1000KX:
   2797 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2798 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2799 			break;
   2800 		case CTRL_EXT_LINK_MODE_SGMII:
   2801 			if (wm_sgmii_uses_mdio(sc)) {
   2802 				aprint_normal_dev(sc->sc_dev,
   2803 				    "SGMII(MDIO)\n");
   2804 				sc->sc_flags |= WM_F_SGMII;
   2805 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2806 				break;
   2807 			}
   2808 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2809 			/*FALLTHROUGH*/
   2810 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2811 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2812 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2813 				if (link_mode
   2814 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2815 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2816 					sc->sc_flags |= WM_F_SGMII;
   2817 					aprint_verbose_dev(sc->sc_dev,
   2818 					    "SGMII\n");
   2819 				} else {
   2820 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2821 					aprint_verbose_dev(sc->sc_dev,
   2822 					    "SERDES\n");
   2823 				}
   2824 				break;
   2825 			}
   2826 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2827 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2828 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2829 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2830 				sc->sc_flags |= WM_F_SGMII;
   2831 			}
   2832 			/* Do not change link mode for 100BaseFX */
   2833 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2834 				break;
   2835 
   2836 			/* Change current link mode setting */
   2837 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2838 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2839 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2840 			else
   2841 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2842 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2843 			break;
   2844 		case CTRL_EXT_LINK_MODE_GMII:
   2845 		default:
   2846 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2847 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2848 			break;
   2849 		}
   2850 
   2851 		reg &= ~CTRL_EXT_I2C_ENA;
   2852 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2853 			reg |= CTRL_EXT_I2C_ENA;
   2854 		else
   2855 			reg &= ~CTRL_EXT_I2C_ENA;
   2856 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2857 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2858 			wm_gmii_setup_phytype(sc, 0, 0);
   2859 			wm_reset_mdicnfg_82580(sc);
   2860 		}
   2861 	} else if (sc->sc_type < WM_T_82543 ||
   2862 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2863 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2864 			aprint_error_dev(sc->sc_dev,
   2865 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2866 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2867 		}
   2868 	} else {
   2869 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2870 			aprint_error_dev(sc->sc_dev,
   2871 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2872 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2873 		}
   2874 	}
   2875 
   2876 	if (sc->sc_type >= WM_T_PCH2)
   2877 		sc->sc_flags |= WM_F_EEE;
   2878 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2879 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2880 		/* XXX: Need special handling for I354. (not yet) */
   2881 		if (sc->sc_type != WM_T_I354)
   2882 			sc->sc_flags |= WM_F_EEE;
   2883 	}
   2884 
   2885 	/* Set device properties (macflags) */
   2886 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2887 
   2888 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2889 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2890 
   2891 #ifdef WM_MPSAFE
   2892 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2893 #else
   2894 	sc->sc_core_lock = NULL;
   2895 #endif
   2896 
   2897 	/* Initialize the media structures accordingly. */
   2898 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2899 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2900 	else
   2901 		wm_tbi_mediainit(sc); /* All others */
   2902 
   2903 	ifp = &sc->sc_ethercom.ec_if;
   2904 	xname = device_xname(sc->sc_dev);
   2905 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2906 	ifp->if_softc = sc;
   2907 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2908 #ifdef WM_MPSAFE
   2909 	ifp->if_extflags = IFEF_MPSAFE;
   2910 #endif
   2911 	ifp->if_ioctl = wm_ioctl;
   2912 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2913 		ifp->if_start = wm_nq_start;
   2914 		/*
   2915 		 * When the number of CPUs is one and the controller can use
   2916 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2917 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2918 		 * and the other is used for link status changing.
   2919 		 * In this situation, wm_nq_transmit() is disadvantageous
   2920 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2921 		 */
   2922 		if (wm_is_using_multiqueue(sc))
   2923 			ifp->if_transmit = wm_nq_transmit;
   2924 	} else {
   2925 		ifp->if_start = wm_start;
   2926 		/*
   2927 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2928 		 */
   2929 		if (wm_is_using_multiqueue(sc))
   2930 			ifp->if_transmit = wm_transmit;
   2931 	}
   2932 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2933 	ifp->if_init = wm_init;
   2934 	ifp->if_stop = wm_stop;
   2935 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2936 	IFQ_SET_READY(&ifp->if_snd);
   2937 
   2938 	/* Check for jumbo frame */
   2939 	switch (sc->sc_type) {
   2940 	case WM_T_82573:
   2941 		/* XXX limited to 9234 if ASPM is disabled */
   2942 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2943 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2944 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2945 		break;
   2946 	case WM_T_82571:
   2947 	case WM_T_82572:
   2948 	case WM_T_82574:
   2949 	case WM_T_82583:
   2950 	case WM_T_82575:
   2951 	case WM_T_82576:
   2952 	case WM_T_82580:
   2953 	case WM_T_I350:
   2954 	case WM_T_I354:
   2955 	case WM_T_I210:
   2956 	case WM_T_I211:
   2957 	case WM_T_80003:
   2958 	case WM_T_ICH9:
   2959 	case WM_T_ICH10:
   2960 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2961 	case WM_T_PCH_LPT:
   2962 	case WM_T_PCH_SPT:
   2963 	case WM_T_PCH_CNP:
   2964 		/* XXX limited to 9234 */
   2965 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2966 		break;
   2967 	case WM_T_PCH:
   2968 		/* XXX limited to 4096 */
   2969 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2970 		break;
   2971 	case WM_T_82542_2_0:
   2972 	case WM_T_82542_2_1:
   2973 	case WM_T_ICH8:
   2974 		/* No support for jumbo frame */
   2975 		break;
   2976 	default:
   2977 		/* ETHER_MAX_LEN_JUMBO */
   2978 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2979 		break;
   2980 	}
   2981 
   2982 	/* If we're a i82543 or greater, we can support VLANs. */
   2983 	if (sc->sc_type >= WM_T_82543) {
   2984 		sc->sc_ethercom.ec_capabilities |=
   2985 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2986 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   2987 	}
   2988 
   2989 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2990 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2991 
   2992 	/*
   2993 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2994 	 * on i82543 and later.
   2995 	 */
   2996 	if (sc->sc_type >= WM_T_82543) {
   2997 		ifp->if_capabilities |=
   2998 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2999 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3000 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3001 		    IFCAP_CSUM_TCPv6_Tx |
   3002 		    IFCAP_CSUM_UDPv6_Tx;
   3003 	}
   3004 
   3005 	/*
   3006 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3007 	 *
   3008 	 *	82541GI (8086:1076) ... no
   3009 	 *	82572EI (8086:10b9) ... yes
   3010 	 */
   3011 	if (sc->sc_type >= WM_T_82571) {
   3012 		ifp->if_capabilities |=
   3013 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3014 	}
   3015 
   3016 	/*
   3017 	 * If we're a i82544 or greater (except i82547), we can do
   3018 	 * TCP segmentation offload.
   3019 	 */
   3020 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3021 		ifp->if_capabilities |= IFCAP_TSOv4;
   3022 	}
   3023 
   3024 	if (sc->sc_type >= WM_T_82571) {
   3025 		ifp->if_capabilities |= IFCAP_TSOv6;
   3026 	}
   3027 
   3028 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3029 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3030 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3031 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3032 
   3033 	/* Attach the interface. */
   3034 	error = if_initialize(ifp);
   3035 	if (error != 0) {
   3036 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   3037 		    error);
   3038 		return; /* Error */
   3039 	}
   3040 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3041 	ether_ifattach(ifp, enaddr);
   3042 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3043 	if_register(ifp);
   3044 
   3045 #ifdef WM_EVENT_COUNTERS
   3046 	/* Attach event counters. */
   3047 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3048 	    NULL, xname, "linkintr");
   3049 
   3050 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3051 	    NULL, xname, "tx_xoff");
   3052 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3053 	    NULL, xname, "tx_xon");
   3054 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3055 	    NULL, xname, "rx_xoff");
   3056 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3057 	    NULL, xname, "rx_xon");
   3058 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3059 	    NULL, xname, "rx_macctl");
   3060 #endif /* WM_EVENT_COUNTERS */
   3061 
   3062 	sc->sc_txrx_use_workqueue = false;
   3063 
   3064 	wm_init_sysctls(sc);
   3065 
   3066 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3067 		pmf_class_network_register(self, ifp);
   3068 	else
   3069 		aprint_error_dev(self, "couldn't establish power handler\n");
   3070 
   3071 	sc->sc_flags |= WM_F_ATTACHED;
   3072 out:
   3073 	return;
   3074 }
   3075 
   3076 /* The detach function (ca_detach) */
   3077 static int
   3078 wm_detach(device_t self, int flags __unused)
   3079 {
   3080 	struct wm_softc *sc = device_private(self);
   3081 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3082 	int i;
   3083 
   3084 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3085 		return 0;
   3086 
   3087 	/* Stop the interface. Callouts are stopped in it. */
   3088 	wm_stop(ifp, 1);
   3089 
   3090 	pmf_device_deregister(self);
   3091 
   3092 	sysctl_teardown(&sc->sc_sysctllog);
   3093 
   3094 #ifdef WM_EVENT_COUNTERS
   3095 	evcnt_detach(&sc->sc_ev_linkintr);
   3096 
   3097 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3098 	evcnt_detach(&sc->sc_ev_tx_xon);
   3099 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3100 	evcnt_detach(&sc->sc_ev_rx_xon);
   3101 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3102 #endif /* WM_EVENT_COUNTERS */
   3103 
   3104 	/* Tell the firmware about the release */
   3105 	WM_CORE_LOCK(sc);
   3106 	wm_release_manageability(sc);
   3107 	wm_release_hw_control(sc);
   3108 	wm_enable_wakeup(sc);
   3109 	WM_CORE_UNLOCK(sc);
   3110 
   3111 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3112 
   3113 	ether_ifdetach(ifp);
   3114 	if_detach(ifp);
   3115 	if_percpuq_destroy(sc->sc_ipq);
   3116 
   3117 	/* Delete all remaining media. */
   3118 	ifmedia_fini(&sc->sc_mii.mii_media);
   3119 
   3120 	/* Unload RX dmamaps and free mbufs */
   3121 	for (i = 0; i < sc->sc_nqueues; i++) {
   3122 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3123 		mutex_enter(rxq->rxq_lock);
   3124 		wm_rxdrain(rxq);
   3125 		mutex_exit(rxq->rxq_lock);
   3126 	}
   3127 	/* Must unlock here */
   3128 
   3129 	/* Disestablish the interrupt handler */
   3130 	for (i = 0; i < sc->sc_nintrs; i++) {
   3131 		if (sc->sc_ihs[i] != NULL) {
   3132 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3133 			sc->sc_ihs[i] = NULL;
   3134 		}
   3135 	}
   3136 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3137 
   3138 	/* wm_stop() ensure workqueue is stopped. */
   3139 	workqueue_destroy(sc->sc_queue_wq);
   3140 
   3141 	for (i = 0; i < sc->sc_nqueues; i++)
   3142 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3143 
   3144 	wm_free_txrx_queues(sc);
   3145 
   3146 	/* Unmap the registers */
   3147 	if (sc->sc_ss) {
   3148 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3149 		sc->sc_ss = 0;
   3150 	}
   3151 	if (sc->sc_ios) {
   3152 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3153 		sc->sc_ios = 0;
   3154 	}
   3155 	if (sc->sc_flashs) {
   3156 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3157 		sc->sc_flashs = 0;
   3158 	}
   3159 
   3160 	if (sc->sc_core_lock)
   3161 		mutex_obj_free(sc->sc_core_lock);
   3162 	if (sc->sc_ich_phymtx)
   3163 		mutex_obj_free(sc->sc_ich_phymtx);
   3164 	if (sc->sc_ich_nvmmtx)
   3165 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3166 
   3167 	return 0;
   3168 }
   3169 
   3170 static bool
   3171 wm_suspend(device_t self, const pmf_qual_t *qual)
   3172 {
   3173 	struct wm_softc *sc = device_private(self);
   3174 
   3175 	wm_release_manageability(sc);
   3176 	wm_release_hw_control(sc);
   3177 	wm_enable_wakeup(sc);
   3178 
   3179 	return true;
   3180 }
   3181 
   3182 static bool
   3183 wm_resume(device_t self, const pmf_qual_t *qual)
   3184 {
   3185 	struct wm_softc *sc = device_private(self);
   3186 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3187 	pcireg_t reg;
   3188 	char buf[256];
   3189 
   3190 	reg = CSR_READ(sc, WMREG_WUS);
   3191 	if (reg != 0) {
   3192 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3193 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3194 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3195 	}
   3196 
   3197 	if (sc->sc_type >= WM_T_PCH2)
   3198 		wm_resume_workarounds_pchlan(sc);
   3199 	if ((ifp->if_flags & IFF_UP) == 0) {
   3200 		wm_reset(sc);
   3201 		/* Non-AMT based hardware can now take control from firmware */
   3202 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3203 			wm_get_hw_control(sc);
   3204 		wm_init_manageability(sc);
   3205 	} else {
   3206 		/*
   3207 		 * We called pmf_class_network_register(), so if_init() is
   3208 		 * automatically called when IFF_UP. wm_reset(),
   3209 		 * wm_get_hw_control() and wm_init_manageability() are called
   3210 		 * via wm_init().
   3211 		 */
   3212 	}
   3213 
   3214 	return true;
   3215 }
   3216 
   3217 /*
   3218  * wm_watchdog:		[ifnet interface function]
   3219  *
   3220  *	Watchdog timer handler.
   3221  */
   3222 static void
   3223 wm_watchdog(struct ifnet *ifp)
   3224 {
   3225 	int qid;
   3226 	struct wm_softc *sc = ifp->if_softc;
   3227 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3228 
   3229 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3230 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3231 
   3232 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3233 	}
   3234 
   3235 	/* IF any of queues hanged up, reset the interface. */
   3236 	if (hang_queue != 0) {
   3237 		(void)wm_init(ifp);
   3238 
   3239 		/*
   3240 		 * There are still some upper layer processing which call
   3241 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3242 		 */
   3243 		/* Try to get more packets going. */
   3244 		ifp->if_start(ifp);
   3245 	}
   3246 }
   3247 
   3248 
   3249 static void
   3250 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3251 {
   3252 
   3253 	mutex_enter(txq->txq_lock);
   3254 	if (txq->txq_sending &&
   3255 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3256 		wm_watchdog_txq_locked(ifp, txq, hang);
   3257 
   3258 	mutex_exit(txq->txq_lock);
   3259 }
   3260 
   3261 static void
   3262 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3263     uint16_t *hang)
   3264 {
   3265 	struct wm_softc *sc = ifp->if_softc;
   3266 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3267 
   3268 	KASSERT(mutex_owned(txq->txq_lock));
   3269 
   3270 	/*
   3271 	 * Since we're using delayed interrupts, sweep up
   3272 	 * before we report an error.
   3273 	 */
   3274 	wm_txeof(txq, UINT_MAX);
   3275 
   3276 	if (txq->txq_sending)
   3277 		*hang |= __BIT(wmq->wmq_id);
   3278 
   3279 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3280 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3281 		    device_xname(sc->sc_dev));
   3282 	} else {
   3283 #ifdef WM_DEBUG
   3284 		int i, j;
   3285 		struct wm_txsoft *txs;
   3286 #endif
   3287 		log(LOG_ERR,
   3288 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3289 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3290 		    txq->txq_next);
   3291 		if_statinc(ifp, if_oerrors);
   3292 #ifdef WM_DEBUG
   3293 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3294 		    i = WM_NEXTTXS(txq, i)) {
   3295 			txs = &txq->txq_soft[i];
   3296 			printf("txs %d tx %d -> %d\n",
   3297 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3298 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3299 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3300 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3301 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3302 					printf("\t %#08x%08x\n",
   3303 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3304 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3305 				} else {
   3306 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3307 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3308 					    txq->txq_descs[j].wtx_addr.wa_low);
   3309 					printf("\t %#04x%02x%02x%08x\n",
   3310 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3311 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3312 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3313 					    txq->txq_descs[j].wtx_cmdlen);
   3314 				}
   3315 				if (j == txs->txs_lastdesc)
   3316 					break;
   3317 			}
   3318 		}
   3319 #endif
   3320 	}
   3321 }
   3322 
   3323 /*
   3324  * wm_tick:
   3325  *
   3326  *	One second timer, used to check link status, sweep up
   3327  *	completed transmit jobs, etc.
   3328  */
   3329 static void
   3330 wm_tick(void *arg)
   3331 {
   3332 	struct wm_softc *sc = arg;
   3333 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3334 #ifndef WM_MPSAFE
   3335 	int s = splnet();
   3336 #endif
   3337 
   3338 	WM_CORE_LOCK(sc);
   3339 
   3340 	if (sc->sc_core_stopping) {
   3341 		WM_CORE_UNLOCK(sc);
   3342 #ifndef WM_MPSAFE
   3343 		splx(s);
   3344 #endif
   3345 		return;
   3346 	}
   3347 
   3348 	if (sc->sc_type >= WM_T_82542_2_1) {
   3349 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3350 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3351 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3352 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3353 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3354 	}
   3355 
   3356 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3357 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3358 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3359 	    + CSR_READ(sc, WMREG_CRCERRS)
   3360 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3361 	    + CSR_READ(sc, WMREG_SYMERRC)
   3362 	    + CSR_READ(sc, WMREG_RXERRC)
   3363 	    + CSR_READ(sc, WMREG_SEC)
   3364 	    + CSR_READ(sc, WMREG_CEXTERR)
   3365 	    + CSR_READ(sc, WMREG_RLEC));
   3366 	/*
   3367 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3368 	 * memory. It does not mean the number of dropped packet. Because
   3369 	 * ethernet controller can receive packets in such case if there is
   3370 	 * space in phy's FIFO.
   3371 	 *
   3372 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3373 	 * own EVCNT instead of if_iqdrops.
   3374 	 */
   3375 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3376 	IF_STAT_PUTREF(ifp);
   3377 
   3378 	if (sc->sc_flags & WM_F_HAS_MII)
   3379 		mii_tick(&sc->sc_mii);
   3380 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3381 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3382 		wm_serdes_tick(sc);
   3383 	else
   3384 		wm_tbi_tick(sc);
   3385 
   3386 	WM_CORE_UNLOCK(sc);
   3387 
   3388 	wm_watchdog(ifp);
   3389 
   3390 	callout_schedule(&sc->sc_tick_ch, hz);
   3391 }
   3392 
   3393 static int
   3394 wm_ifflags_cb(struct ethercom *ec)
   3395 {
   3396 	struct ifnet *ifp = &ec->ec_if;
   3397 	struct wm_softc *sc = ifp->if_softc;
   3398 	u_short iffchange;
   3399 	int ecchange;
   3400 	bool needreset = false;
   3401 	int rc = 0;
   3402 
   3403 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3404 		device_xname(sc->sc_dev), __func__));
   3405 
   3406 	WM_CORE_LOCK(sc);
   3407 
   3408 	/*
   3409 	 * Check for if_flags.
   3410 	 * Main usage is to prevent linkdown when opening bpf.
   3411 	 */
   3412 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3413 	sc->sc_if_flags = ifp->if_flags;
   3414 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3415 		needreset = true;
   3416 		goto ec;
   3417 	}
   3418 
   3419 	/* iff related updates */
   3420 	if ((iffchange & IFF_PROMISC) != 0)
   3421 		wm_set_filter(sc);
   3422 
   3423 	wm_set_vlan(sc);
   3424 
   3425 ec:
   3426 	/* Check for ec_capenable. */
   3427 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3428 	sc->sc_ec_capenable = ec->ec_capenable;
   3429 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3430 		needreset = true;
   3431 		goto out;
   3432 	}
   3433 
   3434 	/* ec related updates */
   3435 	wm_set_eee(sc);
   3436 
   3437 out:
   3438 	if (needreset)
   3439 		rc = ENETRESET;
   3440 	WM_CORE_UNLOCK(sc);
   3441 
   3442 	return rc;
   3443 }
   3444 
   3445 /*
   3446  * wm_ioctl:		[ifnet interface function]
   3447  *
   3448  *	Handle control requests from the operator.
   3449  */
   3450 static int
   3451 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3452 {
   3453 	struct wm_softc *sc = ifp->if_softc;
   3454 	struct ifreq *ifr = (struct ifreq *)data;
   3455 	struct ifaddr *ifa = (struct ifaddr *)data;
   3456 	struct sockaddr_dl *sdl;
   3457 	int s, error;
   3458 
   3459 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3460 		device_xname(sc->sc_dev), __func__));
   3461 
   3462 #ifndef WM_MPSAFE
   3463 	s = splnet();
   3464 #endif
   3465 	switch (cmd) {
   3466 	case SIOCSIFMEDIA:
   3467 		WM_CORE_LOCK(sc);
   3468 		/* Flow control requires full-duplex mode. */
   3469 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3470 		    (ifr->ifr_media & IFM_FDX) == 0)
   3471 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3472 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3473 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3474 				/* We can do both TXPAUSE and RXPAUSE. */
   3475 				ifr->ifr_media |=
   3476 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3477 			}
   3478 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3479 		}
   3480 		WM_CORE_UNLOCK(sc);
   3481 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3482 		break;
   3483 	case SIOCINITIFADDR:
   3484 		WM_CORE_LOCK(sc);
   3485 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3486 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3487 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3488 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3489 			/* Unicast address is the first multicast entry */
   3490 			wm_set_filter(sc);
   3491 			error = 0;
   3492 			WM_CORE_UNLOCK(sc);
   3493 			break;
   3494 		}
   3495 		WM_CORE_UNLOCK(sc);
   3496 		/*FALLTHROUGH*/
   3497 	default:
   3498 #ifdef WM_MPSAFE
   3499 		s = splnet();
   3500 #endif
   3501 		/* It may call wm_start, so unlock here */
   3502 		error = ether_ioctl(ifp, cmd, data);
   3503 #ifdef WM_MPSAFE
   3504 		splx(s);
   3505 #endif
   3506 		if (error != ENETRESET)
   3507 			break;
   3508 
   3509 		error = 0;
   3510 
   3511 		if (cmd == SIOCSIFCAP)
   3512 			error = (*ifp->if_init)(ifp);
   3513 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3514 			;
   3515 		else if (ifp->if_flags & IFF_RUNNING) {
   3516 			/*
   3517 			 * Multicast list has changed; set the hardware filter
   3518 			 * accordingly.
   3519 			 */
   3520 			WM_CORE_LOCK(sc);
   3521 			wm_set_filter(sc);
   3522 			WM_CORE_UNLOCK(sc);
   3523 		}
   3524 		break;
   3525 	}
   3526 
   3527 #ifndef WM_MPSAFE
   3528 	splx(s);
   3529 #endif
   3530 	return error;
   3531 }
   3532 
   3533 /* MAC address related */
   3534 
   3535 /*
   3536  * Get the offset of MAC address and return it.
   3537  * If error occured, use offset 0.
   3538  */
   3539 static uint16_t
   3540 wm_check_alt_mac_addr(struct wm_softc *sc)
   3541 {
   3542 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3543 	uint16_t offset = NVM_OFF_MACADDR;
   3544 
   3545 	/* Try to read alternative MAC address pointer */
   3546 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3547 		return 0;
   3548 
   3549 	/* Check pointer if it's valid or not. */
   3550 	if ((offset == 0x0000) || (offset == 0xffff))
   3551 		return 0;
   3552 
   3553 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3554 	/*
   3555 	 * Check whether alternative MAC address is valid or not.
   3556 	 * Some cards have non 0xffff pointer but those don't use
   3557 	 * alternative MAC address in reality.
   3558 	 *
   3559 	 * Check whether the broadcast bit is set or not.
   3560 	 */
   3561 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3562 		if (((myea[0] & 0xff) & 0x01) == 0)
   3563 			return offset; /* Found */
   3564 
   3565 	/* Not found */
   3566 	return 0;
   3567 }
   3568 
   3569 static int
   3570 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3571 {
   3572 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3573 	uint16_t offset = NVM_OFF_MACADDR;
   3574 	int do_invert = 0;
   3575 
   3576 	switch (sc->sc_type) {
   3577 	case WM_T_82580:
   3578 	case WM_T_I350:
   3579 	case WM_T_I354:
   3580 		/* EEPROM Top Level Partitioning */
   3581 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3582 		break;
   3583 	case WM_T_82571:
   3584 	case WM_T_82575:
   3585 	case WM_T_82576:
   3586 	case WM_T_80003:
   3587 	case WM_T_I210:
   3588 	case WM_T_I211:
   3589 		offset = wm_check_alt_mac_addr(sc);
   3590 		if (offset == 0)
   3591 			if ((sc->sc_funcid & 0x01) == 1)
   3592 				do_invert = 1;
   3593 		break;
   3594 	default:
   3595 		if ((sc->sc_funcid & 0x01) == 1)
   3596 			do_invert = 1;
   3597 		break;
   3598 	}
   3599 
   3600 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3601 		goto bad;
   3602 
   3603 	enaddr[0] = myea[0] & 0xff;
   3604 	enaddr[1] = myea[0] >> 8;
   3605 	enaddr[2] = myea[1] & 0xff;
   3606 	enaddr[3] = myea[1] >> 8;
   3607 	enaddr[4] = myea[2] & 0xff;
   3608 	enaddr[5] = myea[2] >> 8;
   3609 
   3610 	/*
   3611 	 * Toggle the LSB of the MAC address on the second port
   3612 	 * of some dual port cards.
   3613 	 */
   3614 	if (do_invert != 0)
   3615 		enaddr[5] ^= 1;
   3616 
   3617 	return 0;
   3618 
   3619  bad:
   3620 	return -1;
   3621 }
   3622 
   3623 /*
   3624  * wm_set_ral:
   3625  *
   3626  *	Set an entery in the receive address list.
   3627  */
   3628 static void
   3629 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3630 {
   3631 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3632 	uint32_t wlock_mac;
   3633 	int rv;
   3634 
   3635 	if (enaddr != NULL) {
   3636 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3637 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3638 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3639 		ral_hi |= RAL_AV;
   3640 	} else {
   3641 		ral_lo = 0;
   3642 		ral_hi = 0;
   3643 	}
   3644 
   3645 	switch (sc->sc_type) {
   3646 	case WM_T_82542_2_0:
   3647 	case WM_T_82542_2_1:
   3648 	case WM_T_82543:
   3649 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3650 		CSR_WRITE_FLUSH(sc);
   3651 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3652 		CSR_WRITE_FLUSH(sc);
   3653 		break;
   3654 	case WM_T_PCH2:
   3655 	case WM_T_PCH_LPT:
   3656 	case WM_T_PCH_SPT:
   3657 	case WM_T_PCH_CNP:
   3658 		if (idx == 0) {
   3659 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3660 			CSR_WRITE_FLUSH(sc);
   3661 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3662 			CSR_WRITE_FLUSH(sc);
   3663 			return;
   3664 		}
   3665 		if (sc->sc_type != WM_T_PCH2) {
   3666 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3667 			    FWSM_WLOCK_MAC);
   3668 			addrl = WMREG_SHRAL(idx - 1);
   3669 			addrh = WMREG_SHRAH(idx - 1);
   3670 		} else {
   3671 			wlock_mac = 0;
   3672 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3673 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3674 		}
   3675 
   3676 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3677 			rv = wm_get_swflag_ich8lan(sc);
   3678 			if (rv != 0)
   3679 				return;
   3680 			CSR_WRITE(sc, addrl, ral_lo);
   3681 			CSR_WRITE_FLUSH(sc);
   3682 			CSR_WRITE(sc, addrh, ral_hi);
   3683 			CSR_WRITE_FLUSH(sc);
   3684 			wm_put_swflag_ich8lan(sc);
   3685 		}
   3686 
   3687 		break;
   3688 	default:
   3689 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3690 		CSR_WRITE_FLUSH(sc);
   3691 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3692 		CSR_WRITE_FLUSH(sc);
   3693 		break;
   3694 	}
   3695 }
   3696 
   3697 /*
   3698  * wm_mchash:
   3699  *
   3700  *	Compute the hash of the multicast address for the 4096-bit
   3701  *	multicast filter.
   3702  */
   3703 static uint32_t
   3704 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3705 {
   3706 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3707 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3708 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3709 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3710 	uint32_t hash;
   3711 
   3712 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3713 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3714 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3715 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3716 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3717 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3718 		return (hash & 0x3ff);
   3719 	}
   3720 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3721 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3722 
   3723 	return (hash & 0xfff);
   3724 }
   3725 
   3726 /*
   3727  *
   3728  *
   3729  */
   3730 static int
   3731 wm_rar_count(struct wm_softc *sc)
   3732 {
   3733 	int size;
   3734 
   3735 	switch (sc->sc_type) {
   3736 	case WM_T_ICH8:
   3737 		size = WM_RAL_TABSIZE_ICH8 -1;
   3738 		break;
   3739 	case WM_T_ICH9:
   3740 	case WM_T_ICH10:
   3741 	case WM_T_PCH:
   3742 		size = WM_RAL_TABSIZE_ICH8;
   3743 		break;
   3744 	case WM_T_PCH2:
   3745 		size = WM_RAL_TABSIZE_PCH2;
   3746 		break;
   3747 	case WM_T_PCH_LPT:
   3748 	case WM_T_PCH_SPT:
   3749 	case WM_T_PCH_CNP:
   3750 		size = WM_RAL_TABSIZE_PCH_LPT;
   3751 		break;
   3752 	case WM_T_82575:
   3753 	case WM_T_I210:
   3754 	case WM_T_I211:
   3755 		size = WM_RAL_TABSIZE_82575;
   3756 		break;
   3757 	case WM_T_82576:
   3758 	case WM_T_82580:
   3759 		size = WM_RAL_TABSIZE_82576;
   3760 		break;
   3761 	case WM_T_I350:
   3762 	case WM_T_I354:
   3763 		size = WM_RAL_TABSIZE_I350;
   3764 		break;
   3765 	default:
   3766 		size = WM_RAL_TABSIZE;
   3767 	}
   3768 
   3769 	return size;
   3770 }
   3771 
   3772 /*
   3773  * wm_set_filter:
   3774  *
   3775  *	Set up the receive filter.
   3776  */
   3777 static void
   3778 wm_set_filter(struct wm_softc *sc)
   3779 {
   3780 	struct ethercom *ec = &sc->sc_ethercom;
   3781 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3782 	struct ether_multi *enm;
   3783 	struct ether_multistep step;
   3784 	bus_addr_t mta_reg;
   3785 	uint32_t hash, reg, bit;
   3786 	int i, size, ralmax;
   3787 
   3788 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3789 		device_xname(sc->sc_dev), __func__));
   3790 
   3791 	if (sc->sc_type >= WM_T_82544)
   3792 		mta_reg = WMREG_CORDOVA_MTA;
   3793 	else
   3794 		mta_reg = WMREG_MTA;
   3795 
   3796 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3797 
   3798 	if (ifp->if_flags & IFF_BROADCAST)
   3799 		sc->sc_rctl |= RCTL_BAM;
   3800 	if (ifp->if_flags & IFF_PROMISC) {
   3801 		sc->sc_rctl |= RCTL_UPE;
   3802 		ETHER_LOCK(ec);
   3803 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3804 		ETHER_UNLOCK(ec);
   3805 		goto allmulti;
   3806 	}
   3807 
   3808 	/*
   3809 	 * Set the station address in the first RAL slot, and
   3810 	 * clear the remaining slots.
   3811 	 */
   3812 	size = wm_rar_count(sc);
   3813 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3814 
   3815 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3816 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3817 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3818 		switch (i) {
   3819 		case 0:
   3820 			/* We can use all entries */
   3821 			ralmax = size;
   3822 			break;
   3823 		case 1:
   3824 			/* Only RAR[0] */
   3825 			ralmax = 1;
   3826 			break;
   3827 		default:
   3828 			/* Available SHRA + RAR[0] */
   3829 			ralmax = i + 1;
   3830 		}
   3831 	} else
   3832 		ralmax = size;
   3833 	for (i = 1; i < size; i++) {
   3834 		if (i < ralmax)
   3835 			wm_set_ral(sc, NULL, i);
   3836 	}
   3837 
   3838 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3839 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3840 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3841 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3842 		size = WM_ICH8_MC_TABSIZE;
   3843 	else
   3844 		size = WM_MC_TABSIZE;
   3845 	/* Clear out the multicast table. */
   3846 	for (i = 0; i < size; i++) {
   3847 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3848 		CSR_WRITE_FLUSH(sc);
   3849 	}
   3850 
   3851 	ETHER_LOCK(ec);
   3852 	ETHER_FIRST_MULTI(step, ec, enm);
   3853 	while (enm != NULL) {
   3854 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3855 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3856 			ETHER_UNLOCK(ec);
   3857 			/*
   3858 			 * We must listen to a range of multicast addresses.
   3859 			 * For now, just accept all multicasts, rather than
   3860 			 * trying to set only those filter bits needed to match
   3861 			 * the range.  (At this time, the only use of address
   3862 			 * ranges is for IP multicast routing, for which the
   3863 			 * range is big enough to require all bits set.)
   3864 			 */
   3865 			goto allmulti;
   3866 		}
   3867 
   3868 		hash = wm_mchash(sc, enm->enm_addrlo);
   3869 
   3870 		reg = (hash >> 5);
   3871 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3872 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3873 		    || (sc->sc_type == WM_T_PCH2)
   3874 		    || (sc->sc_type == WM_T_PCH_LPT)
   3875 		    || (sc->sc_type == WM_T_PCH_SPT)
   3876 		    || (sc->sc_type == WM_T_PCH_CNP))
   3877 			reg &= 0x1f;
   3878 		else
   3879 			reg &= 0x7f;
   3880 		bit = hash & 0x1f;
   3881 
   3882 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3883 		hash |= 1U << bit;
   3884 
   3885 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3886 			/*
   3887 			 * 82544 Errata 9: Certain register cannot be written
   3888 			 * with particular alignments in PCI-X bus operation
   3889 			 * (FCAH, MTA and VFTA).
   3890 			 */
   3891 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3892 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3893 			CSR_WRITE_FLUSH(sc);
   3894 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3895 			CSR_WRITE_FLUSH(sc);
   3896 		} else {
   3897 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3898 			CSR_WRITE_FLUSH(sc);
   3899 		}
   3900 
   3901 		ETHER_NEXT_MULTI(step, enm);
   3902 	}
   3903 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   3904 	ETHER_UNLOCK(ec);
   3905 
   3906 	goto setit;
   3907 
   3908  allmulti:
   3909 	sc->sc_rctl |= RCTL_MPE;
   3910 
   3911  setit:
   3912 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3913 }
   3914 
   3915 /* Reset and init related */
   3916 
   3917 static void
   3918 wm_set_vlan(struct wm_softc *sc)
   3919 {
   3920 
   3921 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3922 		device_xname(sc->sc_dev), __func__));
   3923 
   3924 	/* Deal with VLAN enables. */
   3925 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3926 		sc->sc_ctrl |= CTRL_VME;
   3927 	else
   3928 		sc->sc_ctrl &= ~CTRL_VME;
   3929 
   3930 	/* Write the control registers. */
   3931 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3932 }
   3933 
   3934 static void
   3935 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3936 {
   3937 	uint32_t gcr;
   3938 	pcireg_t ctrl2;
   3939 
   3940 	gcr = CSR_READ(sc, WMREG_GCR);
   3941 
   3942 	/* Only take action if timeout value is defaulted to 0 */
   3943 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3944 		goto out;
   3945 
   3946 	if ((gcr & GCR_CAP_VER2) == 0) {
   3947 		gcr |= GCR_CMPL_TMOUT_10MS;
   3948 		goto out;
   3949 	}
   3950 
   3951 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3952 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3953 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3954 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3955 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3956 
   3957 out:
   3958 	/* Disable completion timeout resend */
   3959 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3960 
   3961 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3962 }
   3963 
   3964 void
   3965 wm_get_auto_rd_done(struct wm_softc *sc)
   3966 {
   3967 	int i;
   3968 
   3969 	/* wait for eeprom to reload */
   3970 	switch (sc->sc_type) {
   3971 	case WM_T_82571:
   3972 	case WM_T_82572:
   3973 	case WM_T_82573:
   3974 	case WM_T_82574:
   3975 	case WM_T_82583:
   3976 	case WM_T_82575:
   3977 	case WM_T_82576:
   3978 	case WM_T_82580:
   3979 	case WM_T_I350:
   3980 	case WM_T_I354:
   3981 	case WM_T_I210:
   3982 	case WM_T_I211:
   3983 	case WM_T_80003:
   3984 	case WM_T_ICH8:
   3985 	case WM_T_ICH9:
   3986 		for (i = 0; i < 10; i++) {
   3987 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3988 				break;
   3989 			delay(1000);
   3990 		}
   3991 		if (i == 10) {
   3992 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3993 			    "complete\n", device_xname(sc->sc_dev));
   3994 		}
   3995 		break;
   3996 	default:
   3997 		break;
   3998 	}
   3999 }
   4000 
   4001 void
   4002 wm_lan_init_done(struct wm_softc *sc)
   4003 {
   4004 	uint32_t reg = 0;
   4005 	int i;
   4006 
   4007 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4008 		device_xname(sc->sc_dev), __func__));
   4009 
   4010 	/* Wait for eeprom to reload */
   4011 	switch (sc->sc_type) {
   4012 	case WM_T_ICH10:
   4013 	case WM_T_PCH:
   4014 	case WM_T_PCH2:
   4015 	case WM_T_PCH_LPT:
   4016 	case WM_T_PCH_SPT:
   4017 	case WM_T_PCH_CNP:
   4018 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4019 			reg = CSR_READ(sc, WMREG_STATUS);
   4020 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4021 				break;
   4022 			delay(100);
   4023 		}
   4024 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4025 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4026 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4027 		}
   4028 		break;
   4029 	default:
   4030 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4031 		    __func__);
   4032 		break;
   4033 	}
   4034 
   4035 	reg &= ~STATUS_LAN_INIT_DONE;
   4036 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4037 }
   4038 
   4039 void
   4040 wm_get_cfg_done(struct wm_softc *sc)
   4041 {
   4042 	int mask;
   4043 	uint32_t reg;
   4044 	int i;
   4045 
   4046 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4047 		device_xname(sc->sc_dev), __func__));
   4048 
   4049 	/* Wait for eeprom to reload */
   4050 	switch (sc->sc_type) {
   4051 	case WM_T_82542_2_0:
   4052 	case WM_T_82542_2_1:
   4053 		/* null */
   4054 		break;
   4055 	case WM_T_82543:
   4056 	case WM_T_82544:
   4057 	case WM_T_82540:
   4058 	case WM_T_82545:
   4059 	case WM_T_82545_3:
   4060 	case WM_T_82546:
   4061 	case WM_T_82546_3:
   4062 	case WM_T_82541:
   4063 	case WM_T_82541_2:
   4064 	case WM_T_82547:
   4065 	case WM_T_82547_2:
   4066 	case WM_T_82573:
   4067 	case WM_T_82574:
   4068 	case WM_T_82583:
   4069 		/* generic */
   4070 		delay(10*1000);
   4071 		break;
   4072 	case WM_T_80003:
   4073 	case WM_T_82571:
   4074 	case WM_T_82572:
   4075 	case WM_T_82575:
   4076 	case WM_T_82576:
   4077 	case WM_T_82580:
   4078 	case WM_T_I350:
   4079 	case WM_T_I354:
   4080 	case WM_T_I210:
   4081 	case WM_T_I211:
   4082 		if (sc->sc_type == WM_T_82571) {
   4083 			/* Only 82571 shares port 0 */
   4084 			mask = EEMNGCTL_CFGDONE_0;
   4085 		} else
   4086 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4087 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4088 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4089 				break;
   4090 			delay(1000);
   4091 		}
   4092 		if (i >= WM_PHY_CFG_TIMEOUT)
   4093 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   4094 				device_xname(sc->sc_dev), __func__));
   4095 		break;
   4096 	case WM_T_ICH8:
   4097 	case WM_T_ICH9:
   4098 	case WM_T_ICH10:
   4099 	case WM_T_PCH:
   4100 	case WM_T_PCH2:
   4101 	case WM_T_PCH_LPT:
   4102 	case WM_T_PCH_SPT:
   4103 	case WM_T_PCH_CNP:
   4104 		delay(10*1000);
   4105 		if (sc->sc_type >= WM_T_ICH10)
   4106 			wm_lan_init_done(sc);
   4107 		else
   4108 			wm_get_auto_rd_done(sc);
   4109 
   4110 		/* Clear PHY Reset Asserted bit */
   4111 		reg = CSR_READ(sc, WMREG_STATUS);
   4112 		if ((reg & STATUS_PHYRA) != 0)
   4113 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4114 		break;
   4115 	default:
   4116 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4117 		    __func__);
   4118 		break;
   4119 	}
   4120 }
   4121 
   4122 int
   4123 wm_phy_post_reset(struct wm_softc *sc)
   4124 {
   4125 	device_t dev = sc->sc_dev;
   4126 	uint16_t reg;
   4127 	int rv = 0;
   4128 
   4129 	/* This function is only for ICH8 and newer. */
   4130 	if (sc->sc_type < WM_T_ICH8)
   4131 		return 0;
   4132 
   4133 	if (wm_phy_resetisblocked(sc)) {
   4134 		/* XXX */
   4135 		device_printf(dev, "PHY is blocked\n");
   4136 		return -1;
   4137 	}
   4138 
   4139 	/* Allow time for h/w to get to quiescent state after reset */
   4140 	delay(10*1000);
   4141 
   4142 	/* Perform any necessary post-reset workarounds */
   4143 	if (sc->sc_type == WM_T_PCH)
   4144 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4145 	else if (sc->sc_type == WM_T_PCH2)
   4146 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4147 	if (rv != 0)
   4148 		return rv;
   4149 
   4150 	/* Clear the host wakeup bit after lcd reset */
   4151 	if (sc->sc_type >= WM_T_PCH) {
   4152 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4153 		reg &= ~BM_WUC_HOST_WU_BIT;
   4154 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4155 	}
   4156 
   4157 	/* Configure the LCD with the extended configuration region in NVM */
   4158 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4159 		return rv;
   4160 
   4161 	/* Configure the LCD with the OEM bits in NVM */
   4162 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4163 
   4164 	if (sc->sc_type == WM_T_PCH2) {
   4165 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4166 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4167 			delay(10 * 1000);
   4168 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4169 		}
   4170 		/* Set EEE LPI Update Timer to 200usec */
   4171 		rv = sc->phy.acquire(sc);
   4172 		if (rv)
   4173 			return rv;
   4174 		rv = wm_write_emi_reg_locked(dev,
   4175 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4176 		sc->phy.release(sc);
   4177 	}
   4178 
   4179 	return rv;
   4180 }
   4181 
   4182 /* Only for PCH and newer */
   4183 static int
   4184 wm_write_smbus_addr(struct wm_softc *sc)
   4185 {
   4186 	uint32_t strap, freq;
   4187 	uint16_t phy_data;
   4188 	int rv;
   4189 
   4190 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4191 		device_xname(sc->sc_dev), __func__));
   4192 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4193 
   4194 	strap = CSR_READ(sc, WMREG_STRAP);
   4195 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4196 
   4197 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4198 	if (rv != 0)
   4199 		return -1;
   4200 
   4201 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4202 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4203 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4204 
   4205 	if (sc->sc_phytype == WMPHY_I217) {
   4206 		/* Restore SMBus frequency */
   4207 		if (freq --) {
   4208 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4209 			    | HV_SMB_ADDR_FREQ_HIGH);
   4210 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4211 			    HV_SMB_ADDR_FREQ_LOW);
   4212 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4213 			    HV_SMB_ADDR_FREQ_HIGH);
   4214 		} else
   4215 			DPRINTF(WM_DEBUG_INIT,
   4216 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4217 				device_xname(sc->sc_dev), __func__));
   4218 	}
   4219 
   4220 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4221 	    phy_data);
   4222 }
   4223 
   4224 static int
   4225 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4226 {
   4227 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4228 	uint16_t phy_page = 0;
   4229 	int rv = 0;
   4230 
   4231 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4232 		device_xname(sc->sc_dev), __func__));
   4233 
   4234 	switch (sc->sc_type) {
   4235 	case WM_T_ICH8:
   4236 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4237 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4238 			return 0;
   4239 
   4240 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4241 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4242 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4243 			break;
   4244 		}
   4245 		/* FALLTHROUGH */
   4246 	case WM_T_PCH:
   4247 	case WM_T_PCH2:
   4248 	case WM_T_PCH_LPT:
   4249 	case WM_T_PCH_SPT:
   4250 	case WM_T_PCH_CNP:
   4251 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4252 		break;
   4253 	default:
   4254 		return 0;
   4255 	}
   4256 
   4257 	if ((rv = sc->phy.acquire(sc)) != 0)
   4258 		return rv;
   4259 
   4260 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4261 	if ((reg & sw_cfg_mask) == 0)
   4262 		goto release;
   4263 
   4264 	/*
   4265 	 * Make sure HW does not configure LCD from PHY extended configuration
   4266 	 * before SW configuration
   4267 	 */
   4268 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4269 	if ((sc->sc_type < WM_T_PCH2)
   4270 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4271 		goto release;
   4272 
   4273 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4274 		device_xname(sc->sc_dev), __func__));
   4275 	/* word_addr is in DWORD */
   4276 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4277 
   4278 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4279 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4280 	if (cnf_size == 0)
   4281 		goto release;
   4282 
   4283 	if (((sc->sc_type == WM_T_PCH)
   4284 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4285 	    || (sc->sc_type > WM_T_PCH)) {
   4286 		/*
   4287 		 * HW configures the SMBus address and LEDs when the OEM and
   4288 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4289 		 * are cleared, SW will configure them instead.
   4290 		 */
   4291 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4292 			device_xname(sc->sc_dev), __func__));
   4293 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4294 			goto release;
   4295 
   4296 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4297 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4298 		    (uint16_t)reg);
   4299 		if (rv != 0)
   4300 			goto release;
   4301 	}
   4302 
   4303 	/* Configure LCD from extended configuration region. */
   4304 	for (i = 0; i < cnf_size; i++) {
   4305 		uint16_t reg_data, reg_addr;
   4306 
   4307 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4308 			goto release;
   4309 
   4310 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4311 			goto release;
   4312 
   4313 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4314 			phy_page = reg_data;
   4315 
   4316 		reg_addr &= IGPHY_MAXREGADDR;
   4317 		reg_addr |= phy_page;
   4318 
   4319 		KASSERT(sc->phy.writereg_locked != NULL);
   4320 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4321 		    reg_data);
   4322 	}
   4323 
   4324 release:
   4325 	sc->phy.release(sc);
   4326 	return rv;
   4327 }
   4328 
   4329 /*
   4330  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4331  *  @sc:       pointer to the HW structure
   4332  *  @d0_state: boolean if entering d0 or d3 device state
   4333  *
   4334  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4335  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4336  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4337  */
   4338 int
   4339 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4340 {
   4341 	uint32_t mac_reg;
   4342 	uint16_t oem_reg;
   4343 	int rv;
   4344 
   4345 	if (sc->sc_type < WM_T_PCH)
   4346 		return 0;
   4347 
   4348 	rv = sc->phy.acquire(sc);
   4349 	if (rv != 0)
   4350 		return rv;
   4351 
   4352 	if (sc->sc_type == WM_T_PCH) {
   4353 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4354 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4355 			goto release;
   4356 	}
   4357 
   4358 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4359 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4360 		goto release;
   4361 
   4362 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4363 
   4364 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4365 	if (rv != 0)
   4366 		goto release;
   4367 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4368 
   4369 	if (d0_state) {
   4370 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4371 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4372 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4373 			oem_reg |= HV_OEM_BITS_LPLU;
   4374 	} else {
   4375 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4376 		    != 0)
   4377 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4378 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4379 		    != 0)
   4380 			oem_reg |= HV_OEM_BITS_LPLU;
   4381 	}
   4382 
   4383 	/* Set Restart auto-neg to activate the bits */
   4384 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4385 	    && (wm_phy_resetisblocked(sc) == false))
   4386 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4387 
   4388 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4389 
   4390 release:
   4391 	sc->phy.release(sc);
   4392 
   4393 	return rv;
   4394 }
   4395 
   4396 /* Init hardware bits */
   4397 void
   4398 wm_initialize_hardware_bits(struct wm_softc *sc)
   4399 {
   4400 	uint32_t tarc0, tarc1, reg;
   4401 
   4402 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4403 		device_xname(sc->sc_dev), __func__));
   4404 
   4405 	/* For 82571 variant, 80003 and ICHs */
   4406 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4407 	    || (sc->sc_type >= WM_T_80003)) {
   4408 
   4409 		/* Transmit Descriptor Control 0 */
   4410 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4411 		reg |= TXDCTL_COUNT_DESC;
   4412 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4413 
   4414 		/* Transmit Descriptor Control 1 */
   4415 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4416 		reg |= TXDCTL_COUNT_DESC;
   4417 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4418 
   4419 		/* TARC0 */
   4420 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4421 		switch (sc->sc_type) {
   4422 		case WM_T_82571:
   4423 		case WM_T_82572:
   4424 		case WM_T_82573:
   4425 		case WM_T_82574:
   4426 		case WM_T_82583:
   4427 		case WM_T_80003:
   4428 			/* Clear bits 30..27 */
   4429 			tarc0 &= ~__BITS(30, 27);
   4430 			break;
   4431 		default:
   4432 			break;
   4433 		}
   4434 
   4435 		switch (sc->sc_type) {
   4436 		case WM_T_82571:
   4437 		case WM_T_82572:
   4438 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4439 
   4440 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4441 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4442 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4443 			/* 8257[12] Errata No.7 */
   4444 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4445 
   4446 			/* TARC1 bit 28 */
   4447 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4448 				tarc1 &= ~__BIT(28);
   4449 			else
   4450 				tarc1 |= __BIT(28);
   4451 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4452 
   4453 			/*
   4454 			 * 8257[12] Errata No.13
   4455 			 * Disable Dyamic Clock Gating.
   4456 			 */
   4457 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4458 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4459 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4460 			break;
   4461 		case WM_T_82573:
   4462 		case WM_T_82574:
   4463 		case WM_T_82583:
   4464 			if ((sc->sc_type == WM_T_82574)
   4465 			    || (sc->sc_type == WM_T_82583))
   4466 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4467 
   4468 			/* Extended Device Control */
   4469 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4470 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4471 			reg |= __BIT(22);	/* Set bit 22 */
   4472 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4473 
   4474 			/* Device Control */
   4475 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4476 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4477 
   4478 			/* PCIe Control Register */
   4479 			/*
   4480 			 * 82573 Errata (unknown).
   4481 			 *
   4482 			 * 82574 Errata 25 and 82583 Errata 12
   4483 			 * "Dropped Rx Packets":
   4484 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4485 			 */
   4486 			reg = CSR_READ(sc, WMREG_GCR);
   4487 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4488 			CSR_WRITE(sc, WMREG_GCR, reg);
   4489 
   4490 			if ((sc->sc_type == WM_T_82574)
   4491 			    || (sc->sc_type == WM_T_82583)) {
   4492 				/*
   4493 				 * Document says this bit must be set for
   4494 				 * proper operation.
   4495 				 */
   4496 				reg = CSR_READ(sc, WMREG_GCR);
   4497 				reg |= __BIT(22);
   4498 				CSR_WRITE(sc, WMREG_GCR, reg);
   4499 
   4500 				/*
   4501 				 * Apply workaround for hardware errata
   4502 				 * documented in errata docs Fixes issue where
   4503 				 * some error prone or unreliable PCIe
   4504 				 * completions are occurring, particularly
   4505 				 * with ASPM enabled. Without fix, issue can
   4506 				 * cause Tx timeouts.
   4507 				 */
   4508 				reg = CSR_READ(sc, WMREG_GCR2);
   4509 				reg |= __BIT(0);
   4510 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4511 			}
   4512 			break;
   4513 		case WM_T_80003:
   4514 			/* TARC0 */
   4515 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4516 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4517 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4518 
   4519 			/* TARC1 bit 28 */
   4520 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4521 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4522 				tarc1 &= ~__BIT(28);
   4523 			else
   4524 				tarc1 |= __BIT(28);
   4525 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4526 			break;
   4527 		case WM_T_ICH8:
   4528 		case WM_T_ICH9:
   4529 		case WM_T_ICH10:
   4530 		case WM_T_PCH:
   4531 		case WM_T_PCH2:
   4532 		case WM_T_PCH_LPT:
   4533 		case WM_T_PCH_SPT:
   4534 		case WM_T_PCH_CNP:
   4535 			/* TARC0 */
   4536 			if (sc->sc_type == WM_T_ICH8) {
   4537 				/* Set TARC0 bits 29 and 28 */
   4538 				tarc0 |= __BITS(29, 28);
   4539 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4540 				tarc0 |= __BIT(29);
   4541 				/*
   4542 				 *  Drop bit 28. From Linux.
   4543 				 * See I218/I219 spec update
   4544 				 * "5. Buffer Overrun While the I219 is
   4545 				 * Processing DMA Transactions"
   4546 				 */
   4547 				tarc0 &= ~__BIT(28);
   4548 			}
   4549 			/* Set TARC0 bits 23,24,26,27 */
   4550 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4551 
   4552 			/* CTRL_EXT */
   4553 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4554 			reg |= __BIT(22);	/* Set bit 22 */
   4555 			/*
   4556 			 * Enable PHY low-power state when MAC is at D3
   4557 			 * w/o WoL
   4558 			 */
   4559 			if (sc->sc_type >= WM_T_PCH)
   4560 				reg |= CTRL_EXT_PHYPDEN;
   4561 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4562 
   4563 			/* TARC1 */
   4564 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4565 			/* bit 28 */
   4566 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4567 				tarc1 &= ~__BIT(28);
   4568 			else
   4569 				tarc1 |= __BIT(28);
   4570 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4571 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4572 
   4573 			/* Device Status */
   4574 			if (sc->sc_type == WM_T_ICH8) {
   4575 				reg = CSR_READ(sc, WMREG_STATUS);
   4576 				reg &= ~__BIT(31);
   4577 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4578 
   4579 			}
   4580 
   4581 			/* IOSFPC */
   4582 			if (sc->sc_type == WM_T_PCH_SPT) {
   4583 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4584 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4585 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4586 			}
   4587 			/*
   4588 			 * Work-around descriptor data corruption issue during
   4589 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4590 			 * capability.
   4591 			 */
   4592 			reg = CSR_READ(sc, WMREG_RFCTL);
   4593 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4594 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4595 			break;
   4596 		default:
   4597 			break;
   4598 		}
   4599 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4600 
   4601 		switch (sc->sc_type) {
   4602 		/*
   4603 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4604 		 * Avoid RSS Hash Value bug.
   4605 		 */
   4606 		case WM_T_82571:
   4607 		case WM_T_82572:
   4608 		case WM_T_82573:
   4609 		case WM_T_80003:
   4610 		case WM_T_ICH8:
   4611 			reg = CSR_READ(sc, WMREG_RFCTL);
   4612 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4613 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4614 			break;
   4615 		case WM_T_82574:
   4616 			/* Use extened Rx descriptor. */
   4617 			reg = CSR_READ(sc, WMREG_RFCTL);
   4618 			reg |= WMREG_RFCTL_EXSTEN;
   4619 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4620 			break;
   4621 		default:
   4622 			break;
   4623 		}
   4624 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4625 		/*
   4626 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4627 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4628 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4629 		 * Correctly by the Device"
   4630 		 *
   4631 		 * I354(C2000) Errata AVR53:
   4632 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4633 		 * Hang"
   4634 		 */
   4635 		reg = CSR_READ(sc, WMREG_RFCTL);
   4636 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4637 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4638 	}
   4639 }
   4640 
   4641 static uint32_t
   4642 wm_rxpbs_adjust_82580(uint32_t val)
   4643 {
   4644 	uint32_t rv = 0;
   4645 
   4646 	if (val < __arraycount(wm_82580_rxpbs_table))
   4647 		rv = wm_82580_rxpbs_table[val];
   4648 
   4649 	return rv;
   4650 }
   4651 
   4652 /*
   4653  * wm_reset_phy:
   4654  *
   4655  *	generic PHY reset function.
   4656  *	Same as e1000_phy_hw_reset_generic()
   4657  */
   4658 static int
   4659 wm_reset_phy(struct wm_softc *sc)
   4660 {
   4661 	uint32_t reg;
   4662 
   4663 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4664 		device_xname(sc->sc_dev), __func__));
   4665 	if (wm_phy_resetisblocked(sc))
   4666 		return -1;
   4667 
   4668 	sc->phy.acquire(sc);
   4669 
   4670 	reg = CSR_READ(sc, WMREG_CTRL);
   4671 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4672 	CSR_WRITE_FLUSH(sc);
   4673 
   4674 	delay(sc->phy.reset_delay_us);
   4675 
   4676 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4677 	CSR_WRITE_FLUSH(sc);
   4678 
   4679 	delay(150);
   4680 
   4681 	sc->phy.release(sc);
   4682 
   4683 	wm_get_cfg_done(sc);
   4684 	wm_phy_post_reset(sc);
   4685 
   4686 	return 0;
   4687 }
   4688 
   4689 /*
   4690  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4691  * so it is enough to check sc->sc_queue[0] only.
   4692  */
   4693 static void
   4694 wm_flush_desc_rings(struct wm_softc *sc)
   4695 {
   4696 	pcireg_t preg;
   4697 	uint32_t reg;
   4698 	struct wm_txqueue *txq;
   4699 	wiseman_txdesc_t *txd;
   4700 	int nexttx;
   4701 	uint32_t rctl;
   4702 
   4703 	/* First, disable MULR fix in FEXTNVM11 */
   4704 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4705 	reg |= FEXTNVM11_DIS_MULRFIX;
   4706 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4707 
   4708 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4709 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4710 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4711 		return;
   4712 
   4713 	/* TX */
   4714 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4715 	    preg, reg);
   4716 	reg = CSR_READ(sc, WMREG_TCTL);
   4717 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4718 
   4719 	txq = &sc->sc_queue[0].wmq_txq;
   4720 	nexttx = txq->txq_next;
   4721 	txd = &txq->txq_descs[nexttx];
   4722 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4723 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4724 	txd->wtx_fields.wtxu_status = 0;
   4725 	txd->wtx_fields.wtxu_options = 0;
   4726 	txd->wtx_fields.wtxu_vlan = 0;
   4727 
   4728 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4729 	    BUS_SPACE_BARRIER_WRITE);
   4730 
   4731 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4732 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4733 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4734 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4735 	delay(250);
   4736 
   4737 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4738 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4739 		return;
   4740 
   4741 	/* RX */
   4742 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4743 	rctl = CSR_READ(sc, WMREG_RCTL);
   4744 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4745 	CSR_WRITE_FLUSH(sc);
   4746 	delay(150);
   4747 
   4748 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4749 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4750 	reg &= 0xffffc000;
   4751 	/*
   4752 	 * Update thresholds: prefetch threshold to 31, host threshold
   4753 	 * to 1 and make sure the granularity is "descriptors" and not
   4754 	 * "cache lines"
   4755 	 */
   4756 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4757 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4758 
   4759 	/* Momentarily enable the RX ring for the changes to take effect */
   4760 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4761 	CSR_WRITE_FLUSH(sc);
   4762 	delay(150);
   4763 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4764 }
   4765 
   4766 /*
   4767  * wm_reset:
   4768  *
   4769  *	Reset the i82542 chip.
   4770  */
   4771 static void
   4772 wm_reset(struct wm_softc *sc)
   4773 {
   4774 	int phy_reset = 0;
   4775 	int i, error = 0;
   4776 	uint32_t reg;
   4777 	uint16_t kmreg;
   4778 	int rv;
   4779 
   4780 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4781 		device_xname(sc->sc_dev), __func__));
   4782 	KASSERT(sc->sc_type != 0);
   4783 
   4784 	/*
   4785 	 * Allocate on-chip memory according to the MTU size.
   4786 	 * The Packet Buffer Allocation register must be written
   4787 	 * before the chip is reset.
   4788 	 */
   4789 	switch (sc->sc_type) {
   4790 	case WM_T_82547:
   4791 	case WM_T_82547_2:
   4792 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4793 		    PBA_22K : PBA_30K;
   4794 		for (i = 0; i < sc->sc_nqueues; i++) {
   4795 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4796 			txq->txq_fifo_head = 0;
   4797 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4798 			txq->txq_fifo_size =
   4799 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4800 			txq->txq_fifo_stall = 0;
   4801 		}
   4802 		break;
   4803 	case WM_T_82571:
   4804 	case WM_T_82572:
   4805 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4806 	case WM_T_80003:
   4807 		sc->sc_pba = PBA_32K;
   4808 		break;
   4809 	case WM_T_82573:
   4810 		sc->sc_pba = PBA_12K;
   4811 		break;
   4812 	case WM_T_82574:
   4813 	case WM_T_82583:
   4814 		sc->sc_pba = PBA_20K;
   4815 		break;
   4816 	case WM_T_82576:
   4817 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4818 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4819 		break;
   4820 	case WM_T_82580:
   4821 	case WM_T_I350:
   4822 	case WM_T_I354:
   4823 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4824 		break;
   4825 	case WM_T_I210:
   4826 	case WM_T_I211:
   4827 		sc->sc_pba = PBA_34K;
   4828 		break;
   4829 	case WM_T_ICH8:
   4830 		/* Workaround for a bit corruption issue in FIFO memory */
   4831 		sc->sc_pba = PBA_8K;
   4832 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4833 		break;
   4834 	case WM_T_ICH9:
   4835 	case WM_T_ICH10:
   4836 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4837 		    PBA_14K : PBA_10K;
   4838 		break;
   4839 	case WM_T_PCH:
   4840 	case WM_T_PCH2:	/* XXX 14K? */
   4841 	case WM_T_PCH_LPT:
   4842 	case WM_T_PCH_SPT:
   4843 	case WM_T_PCH_CNP:
   4844 		sc->sc_pba = PBA_26K;
   4845 		break;
   4846 	default:
   4847 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4848 		    PBA_40K : PBA_48K;
   4849 		break;
   4850 	}
   4851 	/*
   4852 	 * Only old or non-multiqueue devices have the PBA register
   4853 	 * XXX Need special handling for 82575.
   4854 	 */
   4855 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4856 	    || (sc->sc_type == WM_T_82575))
   4857 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4858 
   4859 	/* Prevent the PCI-E bus from sticking */
   4860 	if (sc->sc_flags & WM_F_PCIE) {
   4861 		int timeout = 800;
   4862 
   4863 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4864 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4865 
   4866 		while (timeout--) {
   4867 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4868 			    == 0)
   4869 				break;
   4870 			delay(100);
   4871 		}
   4872 		if (timeout == 0)
   4873 			device_printf(sc->sc_dev,
   4874 			    "failed to disable busmastering\n");
   4875 	}
   4876 
   4877 	/* Set the completion timeout for interface */
   4878 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4879 	    || (sc->sc_type == WM_T_82580)
   4880 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4881 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4882 		wm_set_pcie_completion_timeout(sc);
   4883 
   4884 	/* Clear interrupt */
   4885 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4886 	if (wm_is_using_msix(sc)) {
   4887 		if (sc->sc_type != WM_T_82574) {
   4888 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4889 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4890 		} else
   4891 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4892 	}
   4893 
   4894 	/* Stop the transmit and receive processes. */
   4895 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4896 	sc->sc_rctl &= ~RCTL_EN;
   4897 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4898 	CSR_WRITE_FLUSH(sc);
   4899 
   4900 	/* XXX set_tbi_sbp_82543() */
   4901 
   4902 	delay(10*1000);
   4903 
   4904 	/* Must acquire the MDIO ownership before MAC reset */
   4905 	switch (sc->sc_type) {
   4906 	case WM_T_82573:
   4907 	case WM_T_82574:
   4908 	case WM_T_82583:
   4909 		error = wm_get_hw_semaphore_82573(sc);
   4910 		break;
   4911 	default:
   4912 		break;
   4913 	}
   4914 
   4915 	/*
   4916 	 * 82541 Errata 29? & 82547 Errata 28?
   4917 	 * See also the description about PHY_RST bit in CTRL register
   4918 	 * in 8254x_GBe_SDM.pdf.
   4919 	 */
   4920 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4921 		CSR_WRITE(sc, WMREG_CTRL,
   4922 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4923 		CSR_WRITE_FLUSH(sc);
   4924 		delay(5000);
   4925 	}
   4926 
   4927 	switch (sc->sc_type) {
   4928 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4929 	case WM_T_82541:
   4930 	case WM_T_82541_2:
   4931 	case WM_T_82547:
   4932 	case WM_T_82547_2:
   4933 		/*
   4934 		 * On some chipsets, a reset through a memory-mapped write
   4935 		 * cycle can cause the chip to reset before completing the
   4936 		 * write cycle. This causes major headache that can be avoided
   4937 		 * by issuing the reset via indirect register writes through
   4938 		 * I/O space.
   4939 		 *
   4940 		 * So, if we successfully mapped the I/O BAR at attach time,
   4941 		 * use that. Otherwise, try our luck with a memory-mapped
   4942 		 * reset.
   4943 		 */
   4944 		if (sc->sc_flags & WM_F_IOH_VALID)
   4945 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4946 		else
   4947 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4948 		break;
   4949 	case WM_T_82545_3:
   4950 	case WM_T_82546_3:
   4951 		/* Use the shadow control register on these chips. */
   4952 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4953 		break;
   4954 	case WM_T_80003:
   4955 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4956 		sc->phy.acquire(sc);
   4957 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4958 		sc->phy.release(sc);
   4959 		break;
   4960 	case WM_T_ICH8:
   4961 	case WM_T_ICH9:
   4962 	case WM_T_ICH10:
   4963 	case WM_T_PCH:
   4964 	case WM_T_PCH2:
   4965 	case WM_T_PCH_LPT:
   4966 	case WM_T_PCH_SPT:
   4967 	case WM_T_PCH_CNP:
   4968 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4969 		if (wm_phy_resetisblocked(sc) == false) {
   4970 			/*
   4971 			 * Gate automatic PHY configuration by hardware on
   4972 			 * non-managed 82579
   4973 			 */
   4974 			if ((sc->sc_type == WM_T_PCH2)
   4975 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4976 				== 0))
   4977 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4978 
   4979 			reg |= CTRL_PHY_RESET;
   4980 			phy_reset = 1;
   4981 		} else
   4982 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   4983 		sc->phy.acquire(sc);
   4984 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4985 		/* Don't insert a completion barrier when reset */
   4986 		delay(20*1000);
   4987 		mutex_exit(sc->sc_ich_phymtx);
   4988 		break;
   4989 	case WM_T_82580:
   4990 	case WM_T_I350:
   4991 	case WM_T_I354:
   4992 	case WM_T_I210:
   4993 	case WM_T_I211:
   4994 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4995 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4996 			CSR_WRITE_FLUSH(sc);
   4997 		delay(5000);
   4998 		break;
   4999 	case WM_T_82542_2_0:
   5000 	case WM_T_82542_2_1:
   5001 	case WM_T_82543:
   5002 	case WM_T_82540:
   5003 	case WM_T_82545:
   5004 	case WM_T_82546:
   5005 	case WM_T_82571:
   5006 	case WM_T_82572:
   5007 	case WM_T_82573:
   5008 	case WM_T_82574:
   5009 	case WM_T_82575:
   5010 	case WM_T_82576:
   5011 	case WM_T_82583:
   5012 	default:
   5013 		/* Everything else can safely use the documented method. */
   5014 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5015 		break;
   5016 	}
   5017 
   5018 	/* Must release the MDIO ownership after MAC reset */
   5019 	switch (sc->sc_type) {
   5020 	case WM_T_82573:
   5021 	case WM_T_82574:
   5022 	case WM_T_82583:
   5023 		if (error == 0)
   5024 			wm_put_hw_semaphore_82573(sc);
   5025 		break;
   5026 	default:
   5027 		break;
   5028 	}
   5029 
   5030 	/* Set Phy Config Counter to 50msec */
   5031 	if (sc->sc_type == WM_T_PCH2) {
   5032 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5033 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5034 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5035 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5036 	}
   5037 
   5038 	if (phy_reset != 0)
   5039 		wm_get_cfg_done(sc);
   5040 
   5041 	/* Reload EEPROM */
   5042 	switch (sc->sc_type) {
   5043 	case WM_T_82542_2_0:
   5044 	case WM_T_82542_2_1:
   5045 	case WM_T_82543:
   5046 	case WM_T_82544:
   5047 		delay(10);
   5048 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5049 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5050 		CSR_WRITE_FLUSH(sc);
   5051 		delay(2000);
   5052 		break;
   5053 	case WM_T_82540:
   5054 	case WM_T_82545:
   5055 	case WM_T_82545_3:
   5056 	case WM_T_82546:
   5057 	case WM_T_82546_3:
   5058 		delay(5*1000);
   5059 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5060 		break;
   5061 	case WM_T_82541:
   5062 	case WM_T_82541_2:
   5063 	case WM_T_82547:
   5064 	case WM_T_82547_2:
   5065 		delay(20000);
   5066 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5067 		break;
   5068 	case WM_T_82571:
   5069 	case WM_T_82572:
   5070 	case WM_T_82573:
   5071 	case WM_T_82574:
   5072 	case WM_T_82583:
   5073 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5074 			delay(10);
   5075 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5076 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5077 			CSR_WRITE_FLUSH(sc);
   5078 		}
   5079 		/* check EECD_EE_AUTORD */
   5080 		wm_get_auto_rd_done(sc);
   5081 		/*
   5082 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5083 		 * is set.
   5084 		 */
   5085 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5086 		    || (sc->sc_type == WM_T_82583))
   5087 			delay(25*1000);
   5088 		break;
   5089 	case WM_T_82575:
   5090 	case WM_T_82576:
   5091 	case WM_T_82580:
   5092 	case WM_T_I350:
   5093 	case WM_T_I354:
   5094 	case WM_T_I210:
   5095 	case WM_T_I211:
   5096 	case WM_T_80003:
   5097 		/* check EECD_EE_AUTORD */
   5098 		wm_get_auto_rd_done(sc);
   5099 		break;
   5100 	case WM_T_ICH8:
   5101 	case WM_T_ICH9:
   5102 	case WM_T_ICH10:
   5103 	case WM_T_PCH:
   5104 	case WM_T_PCH2:
   5105 	case WM_T_PCH_LPT:
   5106 	case WM_T_PCH_SPT:
   5107 	case WM_T_PCH_CNP:
   5108 		break;
   5109 	default:
   5110 		panic("%s: unknown type\n", __func__);
   5111 	}
   5112 
   5113 	/* Check whether EEPROM is present or not */
   5114 	switch (sc->sc_type) {
   5115 	case WM_T_82575:
   5116 	case WM_T_82576:
   5117 	case WM_T_82580:
   5118 	case WM_T_I350:
   5119 	case WM_T_I354:
   5120 	case WM_T_ICH8:
   5121 	case WM_T_ICH9:
   5122 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5123 			/* Not found */
   5124 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5125 			if (sc->sc_type == WM_T_82575)
   5126 				wm_reset_init_script_82575(sc);
   5127 		}
   5128 		break;
   5129 	default:
   5130 		break;
   5131 	}
   5132 
   5133 	if (phy_reset != 0)
   5134 		wm_phy_post_reset(sc);
   5135 
   5136 	if ((sc->sc_type == WM_T_82580)
   5137 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5138 		/* Clear global device reset status bit */
   5139 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5140 	}
   5141 
   5142 	/* Clear any pending interrupt events. */
   5143 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5144 	reg = CSR_READ(sc, WMREG_ICR);
   5145 	if (wm_is_using_msix(sc)) {
   5146 		if (sc->sc_type != WM_T_82574) {
   5147 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5148 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5149 		} else
   5150 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5151 	}
   5152 
   5153 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5154 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5155 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5156 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5157 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5158 		reg |= KABGTXD_BGSQLBIAS;
   5159 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5160 	}
   5161 
   5162 	/* Reload sc_ctrl */
   5163 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5164 
   5165 	wm_set_eee(sc);
   5166 
   5167 	/*
   5168 	 * For PCH, this write will make sure that any noise will be detected
   5169 	 * as a CRC error and be dropped rather than show up as a bad packet
   5170 	 * to the DMA engine
   5171 	 */
   5172 	if (sc->sc_type == WM_T_PCH)
   5173 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5174 
   5175 	if (sc->sc_type >= WM_T_82544)
   5176 		CSR_WRITE(sc, WMREG_WUC, 0);
   5177 
   5178 	if (sc->sc_type < WM_T_82575)
   5179 		wm_disable_aspm(sc); /* Workaround for some chips */
   5180 
   5181 	wm_reset_mdicnfg_82580(sc);
   5182 
   5183 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5184 		wm_pll_workaround_i210(sc);
   5185 
   5186 	if (sc->sc_type == WM_T_80003) {
   5187 		/* Default to TRUE to enable the MDIC W/A */
   5188 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5189 
   5190 		rv = wm_kmrn_readreg(sc,
   5191 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5192 		if (rv == 0) {
   5193 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5194 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5195 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5196 			else
   5197 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5198 		}
   5199 	}
   5200 }
   5201 
   5202 /*
   5203  * wm_add_rxbuf:
   5204  *
   5205  *	Add a receive buffer to the indiciated descriptor.
   5206  */
   5207 static int
   5208 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5209 {
   5210 	struct wm_softc *sc = rxq->rxq_sc;
   5211 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5212 	struct mbuf *m;
   5213 	int error;
   5214 
   5215 	KASSERT(mutex_owned(rxq->rxq_lock));
   5216 
   5217 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5218 	if (m == NULL)
   5219 		return ENOBUFS;
   5220 
   5221 	MCLGET(m, M_DONTWAIT);
   5222 	if ((m->m_flags & M_EXT) == 0) {
   5223 		m_freem(m);
   5224 		return ENOBUFS;
   5225 	}
   5226 
   5227 	if (rxs->rxs_mbuf != NULL)
   5228 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5229 
   5230 	rxs->rxs_mbuf = m;
   5231 
   5232 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5233 	/*
   5234 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5235 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5236 	 */
   5237 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5238 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5239 	if (error) {
   5240 		/* XXX XXX XXX */
   5241 		aprint_error_dev(sc->sc_dev,
   5242 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5243 		panic("wm_add_rxbuf");
   5244 	}
   5245 
   5246 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5247 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5248 
   5249 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5250 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5251 			wm_init_rxdesc(rxq, idx);
   5252 	} else
   5253 		wm_init_rxdesc(rxq, idx);
   5254 
   5255 	return 0;
   5256 }
   5257 
   5258 /*
   5259  * wm_rxdrain:
   5260  *
   5261  *	Drain the receive queue.
   5262  */
   5263 static void
   5264 wm_rxdrain(struct wm_rxqueue *rxq)
   5265 {
   5266 	struct wm_softc *sc = rxq->rxq_sc;
   5267 	struct wm_rxsoft *rxs;
   5268 	int i;
   5269 
   5270 	KASSERT(mutex_owned(rxq->rxq_lock));
   5271 
   5272 	for (i = 0; i < WM_NRXDESC; i++) {
   5273 		rxs = &rxq->rxq_soft[i];
   5274 		if (rxs->rxs_mbuf != NULL) {
   5275 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5276 			m_freem(rxs->rxs_mbuf);
   5277 			rxs->rxs_mbuf = NULL;
   5278 		}
   5279 	}
   5280 }
   5281 
   5282 /*
   5283  * Setup registers for RSS.
   5284  *
   5285  * XXX not yet VMDq support
   5286  */
   5287 static void
   5288 wm_init_rss(struct wm_softc *sc)
   5289 {
   5290 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5291 	int i;
   5292 
   5293 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5294 
   5295 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5296 		unsigned int qid, reta_ent;
   5297 
   5298 		qid  = i % sc->sc_nqueues;
   5299 		switch (sc->sc_type) {
   5300 		case WM_T_82574:
   5301 			reta_ent = __SHIFTIN(qid,
   5302 			    RETA_ENT_QINDEX_MASK_82574);
   5303 			break;
   5304 		case WM_T_82575:
   5305 			reta_ent = __SHIFTIN(qid,
   5306 			    RETA_ENT_QINDEX1_MASK_82575);
   5307 			break;
   5308 		default:
   5309 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5310 			break;
   5311 		}
   5312 
   5313 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5314 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5315 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5316 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5317 	}
   5318 
   5319 	rss_getkey((uint8_t *)rss_key);
   5320 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5321 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5322 
   5323 	if (sc->sc_type == WM_T_82574)
   5324 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5325 	else
   5326 		mrqc = MRQC_ENABLE_RSS_MQ;
   5327 
   5328 	/*
   5329 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5330 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5331 	 */
   5332 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5333 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5334 #if 0
   5335 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5336 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5337 #endif
   5338 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5339 
   5340 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5341 }
   5342 
   5343 /*
   5344  * Adjust TX and RX queue numbers which the system actulally uses.
   5345  *
   5346  * The numbers are affected by below parameters.
   5347  *     - The nubmer of hardware queues
   5348  *     - The number of MSI-X vectors (= "nvectors" argument)
   5349  *     - ncpu
   5350  */
   5351 static void
   5352 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5353 {
   5354 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5355 
   5356 	if (nvectors < 2) {
   5357 		sc->sc_nqueues = 1;
   5358 		return;
   5359 	}
   5360 
   5361 	switch (sc->sc_type) {
   5362 	case WM_T_82572:
   5363 		hw_ntxqueues = 2;
   5364 		hw_nrxqueues = 2;
   5365 		break;
   5366 	case WM_T_82574:
   5367 		hw_ntxqueues = 2;
   5368 		hw_nrxqueues = 2;
   5369 		break;
   5370 	case WM_T_82575:
   5371 		hw_ntxqueues = 4;
   5372 		hw_nrxqueues = 4;
   5373 		break;
   5374 	case WM_T_82576:
   5375 		hw_ntxqueues = 16;
   5376 		hw_nrxqueues = 16;
   5377 		break;
   5378 	case WM_T_82580:
   5379 	case WM_T_I350:
   5380 	case WM_T_I354:
   5381 		hw_ntxqueues = 8;
   5382 		hw_nrxqueues = 8;
   5383 		break;
   5384 	case WM_T_I210:
   5385 		hw_ntxqueues = 4;
   5386 		hw_nrxqueues = 4;
   5387 		break;
   5388 	case WM_T_I211:
   5389 		hw_ntxqueues = 2;
   5390 		hw_nrxqueues = 2;
   5391 		break;
   5392 		/*
   5393 		 * As below ethernet controllers does not support MSI-X,
   5394 		 * this driver let them not use multiqueue.
   5395 		 *     - WM_T_80003
   5396 		 *     - WM_T_ICH8
   5397 		 *     - WM_T_ICH9
   5398 		 *     - WM_T_ICH10
   5399 		 *     - WM_T_PCH
   5400 		 *     - WM_T_PCH2
   5401 		 *     - WM_T_PCH_LPT
   5402 		 */
   5403 	default:
   5404 		hw_ntxqueues = 1;
   5405 		hw_nrxqueues = 1;
   5406 		break;
   5407 	}
   5408 
   5409 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5410 
   5411 	/*
   5412 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5413 	 * the number of queues used actually.
   5414 	 */
   5415 	if (nvectors < hw_nqueues + 1)
   5416 		sc->sc_nqueues = nvectors - 1;
   5417 	else
   5418 		sc->sc_nqueues = hw_nqueues;
   5419 
   5420 	/*
   5421 	 * As queues more then cpus cannot improve scaling, we limit
   5422 	 * the number of queues used actually.
   5423 	 */
   5424 	if (ncpu < sc->sc_nqueues)
   5425 		sc->sc_nqueues = ncpu;
   5426 }
   5427 
   5428 static inline bool
   5429 wm_is_using_msix(struct wm_softc *sc)
   5430 {
   5431 
   5432 	return (sc->sc_nintrs > 1);
   5433 }
   5434 
   5435 static inline bool
   5436 wm_is_using_multiqueue(struct wm_softc *sc)
   5437 {
   5438 
   5439 	return (sc->sc_nqueues > 1);
   5440 }
   5441 
   5442 static int
   5443 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5444 {
   5445 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5446 
   5447 	wmq->wmq_id = qidx;
   5448 	wmq->wmq_intr_idx = intr_idx;
   5449 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5450 #ifdef WM_MPSAFE
   5451 	    | SOFTINT_MPSAFE
   5452 #endif
   5453 	    , wm_handle_queue, wmq);
   5454 	if (wmq->wmq_si != NULL)
   5455 		return 0;
   5456 
   5457 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5458 	    wmq->wmq_id);
   5459 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5460 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5461 	return ENOMEM;
   5462 }
   5463 
   5464 /*
   5465  * Both single interrupt MSI and INTx can use this function.
   5466  */
   5467 static int
   5468 wm_setup_legacy(struct wm_softc *sc)
   5469 {
   5470 	pci_chipset_tag_t pc = sc->sc_pc;
   5471 	const char *intrstr = NULL;
   5472 	char intrbuf[PCI_INTRSTR_LEN];
   5473 	int error;
   5474 
   5475 	error = wm_alloc_txrx_queues(sc);
   5476 	if (error) {
   5477 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5478 		    error);
   5479 		return ENOMEM;
   5480 	}
   5481 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5482 	    sizeof(intrbuf));
   5483 #ifdef WM_MPSAFE
   5484 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5485 #endif
   5486 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5487 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5488 	if (sc->sc_ihs[0] == NULL) {
   5489 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5490 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5491 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5492 		return ENOMEM;
   5493 	}
   5494 
   5495 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5496 	sc->sc_nintrs = 1;
   5497 
   5498 	return wm_softint_establish(sc, 0, 0);
   5499 }
   5500 
   5501 static int
   5502 wm_setup_msix(struct wm_softc *sc)
   5503 {
   5504 	void *vih;
   5505 	kcpuset_t *affinity;
   5506 	int qidx, error, intr_idx, txrx_established;
   5507 	pci_chipset_tag_t pc = sc->sc_pc;
   5508 	const char *intrstr = NULL;
   5509 	char intrbuf[PCI_INTRSTR_LEN];
   5510 	char intr_xname[INTRDEVNAMEBUF];
   5511 
   5512 	if (sc->sc_nqueues < ncpu) {
   5513 		/*
   5514 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5515 		 * interrupts start from CPU#1.
   5516 		 */
   5517 		sc->sc_affinity_offset = 1;
   5518 	} else {
   5519 		/*
   5520 		 * In this case, this device use all CPUs. So, we unify
   5521 		 * affinitied cpu_index to msix vector number for readability.
   5522 		 */
   5523 		sc->sc_affinity_offset = 0;
   5524 	}
   5525 
   5526 	error = wm_alloc_txrx_queues(sc);
   5527 	if (error) {
   5528 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5529 		    error);
   5530 		return ENOMEM;
   5531 	}
   5532 
   5533 	kcpuset_create(&affinity, false);
   5534 	intr_idx = 0;
   5535 
   5536 	/*
   5537 	 * TX and RX
   5538 	 */
   5539 	txrx_established = 0;
   5540 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5541 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5542 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5543 
   5544 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5545 		    sizeof(intrbuf));
   5546 #ifdef WM_MPSAFE
   5547 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5548 		    PCI_INTR_MPSAFE, true);
   5549 #endif
   5550 		memset(intr_xname, 0, sizeof(intr_xname));
   5551 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5552 		    device_xname(sc->sc_dev), qidx);
   5553 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5554 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5555 		if (vih == NULL) {
   5556 			aprint_error_dev(sc->sc_dev,
   5557 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5558 			    intrstr ? " at " : "",
   5559 			    intrstr ? intrstr : "");
   5560 
   5561 			goto fail;
   5562 		}
   5563 		kcpuset_zero(affinity);
   5564 		/* Round-robin affinity */
   5565 		kcpuset_set(affinity, affinity_to);
   5566 		error = interrupt_distribute(vih, affinity, NULL);
   5567 		if (error == 0) {
   5568 			aprint_normal_dev(sc->sc_dev,
   5569 			    "for TX and RX interrupting at %s affinity to %u\n",
   5570 			    intrstr, affinity_to);
   5571 		} else {
   5572 			aprint_normal_dev(sc->sc_dev,
   5573 			    "for TX and RX interrupting at %s\n", intrstr);
   5574 		}
   5575 		sc->sc_ihs[intr_idx] = vih;
   5576 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5577 			goto fail;
   5578 		txrx_established++;
   5579 		intr_idx++;
   5580 	}
   5581 
   5582 	/* LINK */
   5583 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5584 	    sizeof(intrbuf));
   5585 #ifdef WM_MPSAFE
   5586 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5587 #endif
   5588 	memset(intr_xname, 0, sizeof(intr_xname));
   5589 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5590 	    device_xname(sc->sc_dev));
   5591 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5592 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5593 	if (vih == NULL) {
   5594 		aprint_error_dev(sc->sc_dev,
   5595 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5596 		    intrstr ? " at " : "",
   5597 		    intrstr ? intrstr : "");
   5598 
   5599 		goto fail;
   5600 	}
   5601 	/* Keep default affinity to LINK interrupt */
   5602 	aprint_normal_dev(sc->sc_dev,
   5603 	    "for LINK interrupting at %s\n", intrstr);
   5604 	sc->sc_ihs[intr_idx] = vih;
   5605 	sc->sc_link_intr_idx = intr_idx;
   5606 
   5607 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5608 	kcpuset_destroy(affinity);
   5609 	return 0;
   5610 
   5611  fail:
   5612 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5613 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5614 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5615 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5616 	}
   5617 
   5618 	kcpuset_destroy(affinity);
   5619 	return ENOMEM;
   5620 }
   5621 
   5622 static void
   5623 wm_unset_stopping_flags(struct wm_softc *sc)
   5624 {
   5625 	int i;
   5626 
   5627 	KASSERT(WM_CORE_LOCKED(sc));
   5628 
   5629 	/* Must unset stopping flags in ascending order. */
   5630 	for (i = 0; i < sc->sc_nqueues; i++) {
   5631 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5632 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5633 
   5634 		mutex_enter(txq->txq_lock);
   5635 		txq->txq_stopping = false;
   5636 		mutex_exit(txq->txq_lock);
   5637 
   5638 		mutex_enter(rxq->rxq_lock);
   5639 		rxq->rxq_stopping = false;
   5640 		mutex_exit(rxq->rxq_lock);
   5641 	}
   5642 
   5643 	sc->sc_core_stopping = false;
   5644 }
   5645 
   5646 static void
   5647 wm_set_stopping_flags(struct wm_softc *sc)
   5648 {
   5649 	int i;
   5650 
   5651 	KASSERT(WM_CORE_LOCKED(sc));
   5652 
   5653 	sc->sc_core_stopping = true;
   5654 
   5655 	/* Must set stopping flags in ascending order. */
   5656 	for (i = 0; i < sc->sc_nqueues; i++) {
   5657 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5658 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5659 
   5660 		mutex_enter(rxq->rxq_lock);
   5661 		rxq->rxq_stopping = true;
   5662 		mutex_exit(rxq->rxq_lock);
   5663 
   5664 		mutex_enter(txq->txq_lock);
   5665 		txq->txq_stopping = true;
   5666 		mutex_exit(txq->txq_lock);
   5667 	}
   5668 }
   5669 
   5670 /*
   5671  * Write interrupt interval value to ITR or EITR
   5672  */
   5673 static void
   5674 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5675 {
   5676 
   5677 	if (!wmq->wmq_set_itr)
   5678 		return;
   5679 
   5680 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5681 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5682 
   5683 		/*
   5684 		 * 82575 doesn't have CNT_INGR field.
   5685 		 * So, overwrite counter field by software.
   5686 		 */
   5687 		if (sc->sc_type == WM_T_82575)
   5688 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5689 		else
   5690 			eitr |= EITR_CNT_INGR;
   5691 
   5692 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5693 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5694 		/*
   5695 		 * 82574 has both ITR and EITR. SET EITR when we use
   5696 		 * the multi queue function with MSI-X.
   5697 		 */
   5698 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5699 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5700 	} else {
   5701 		KASSERT(wmq->wmq_id == 0);
   5702 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5703 	}
   5704 
   5705 	wmq->wmq_set_itr = false;
   5706 }
   5707 
   5708 /*
   5709  * TODO
   5710  * Below dynamic calculation of itr is almost the same as linux igb,
   5711  * however it does not fit to wm(4). So, we will have been disable AIM
   5712  * until we will find appropriate calculation of itr.
   5713  */
   5714 /*
   5715  * calculate interrupt interval value to be going to write register in
   5716  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5717  */
   5718 static void
   5719 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5720 {
   5721 #ifdef NOTYET
   5722 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5723 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5724 	uint32_t avg_size = 0;
   5725 	uint32_t new_itr;
   5726 
   5727 	if (rxq->rxq_packets)
   5728 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5729 	if (txq->txq_packets)
   5730 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5731 
   5732 	if (avg_size == 0) {
   5733 		new_itr = 450; /* restore default value */
   5734 		goto out;
   5735 	}
   5736 
   5737 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5738 	avg_size += 24;
   5739 
   5740 	/* Don't starve jumbo frames */
   5741 	avg_size = uimin(avg_size, 3000);
   5742 
   5743 	/* Give a little boost to mid-size frames */
   5744 	if ((avg_size > 300) && (avg_size < 1200))
   5745 		new_itr = avg_size / 3;
   5746 	else
   5747 		new_itr = avg_size / 2;
   5748 
   5749 out:
   5750 	/*
   5751 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5752 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5753 	 */
   5754 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5755 		new_itr *= 4;
   5756 
   5757 	if (new_itr != wmq->wmq_itr) {
   5758 		wmq->wmq_itr = new_itr;
   5759 		wmq->wmq_set_itr = true;
   5760 	} else
   5761 		wmq->wmq_set_itr = false;
   5762 
   5763 	rxq->rxq_packets = 0;
   5764 	rxq->rxq_bytes = 0;
   5765 	txq->txq_packets = 0;
   5766 	txq->txq_bytes = 0;
   5767 #endif
   5768 }
   5769 
   5770 static void
   5771 wm_init_sysctls(struct wm_softc *sc)
   5772 {
   5773 	struct sysctllog **log;
   5774 	const struct sysctlnode *rnode, *cnode;
   5775 	int rv;
   5776 	const char *dvname;
   5777 
   5778 	log = &sc->sc_sysctllog;
   5779 	dvname = device_xname(sc->sc_dev);
   5780 
   5781 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5782 	    0, CTLTYPE_NODE, dvname,
   5783 	    SYSCTL_DESCR("wm information and settings"),
   5784 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5785 	if (rv != 0)
   5786 		goto err;
   5787 
   5788 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5789 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5790 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5791 	if (rv != 0)
   5792 		goto teardown;
   5793 
   5794 	return;
   5795 
   5796 teardown:
   5797 	sysctl_teardown(log);
   5798 err:
   5799 	sc->sc_sysctllog = NULL;
   5800 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   5801 	    __func__, rv);
   5802 }
   5803 
   5804 /*
   5805  * wm_init:		[ifnet interface function]
   5806  *
   5807  *	Initialize the interface.
   5808  */
   5809 static int
   5810 wm_init(struct ifnet *ifp)
   5811 {
   5812 	struct wm_softc *sc = ifp->if_softc;
   5813 	int ret;
   5814 
   5815 	WM_CORE_LOCK(sc);
   5816 	ret = wm_init_locked(ifp);
   5817 	WM_CORE_UNLOCK(sc);
   5818 
   5819 	return ret;
   5820 }
   5821 
   5822 static int
   5823 wm_init_locked(struct ifnet *ifp)
   5824 {
   5825 	struct wm_softc *sc = ifp->if_softc;
   5826 	struct ethercom *ec = &sc->sc_ethercom;
   5827 	int i, j, trynum, error = 0;
   5828 	uint32_t reg, sfp_mask = 0;
   5829 
   5830 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5831 		device_xname(sc->sc_dev), __func__));
   5832 	KASSERT(WM_CORE_LOCKED(sc));
   5833 
   5834 	/*
   5835 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5836 	 * There is a small but measurable benefit to avoiding the adjusment
   5837 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5838 	 * on such platforms.  One possibility is that the DMA itself is
   5839 	 * slightly more efficient if the front of the entire packet (instead
   5840 	 * of the front of the headers) is aligned.
   5841 	 *
   5842 	 * Note we must always set align_tweak to 0 if we are using
   5843 	 * jumbo frames.
   5844 	 */
   5845 #ifdef __NO_STRICT_ALIGNMENT
   5846 	sc->sc_align_tweak = 0;
   5847 #else
   5848 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5849 		sc->sc_align_tweak = 0;
   5850 	else
   5851 		sc->sc_align_tweak = 2;
   5852 #endif /* __NO_STRICT_ALIGNMENT */
   5853 
   5854 	/* Cancel any pending I/O. */
   5855 	wm_stop_locked(ifp, false, false);
   5856 
   5857 	/* Update statistics before reset */
   5858 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   5859 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   5860 
   5861 	/* PCH_SPT hardware workaround */
   5862 	if (sc->sc_type == WM_T_PCH_SPT)
   5863 		wm_flush_desc_rings(sc);
   5864 
   5865 	/* Reset the chip to a known state. */
   5866 	wm_reset(sc);
   5867 
   5868 	/*
   5869 	 * AMT based hardware can now take control from firmware
   5870 	 * Do this after reset.
   5871 	 */
   5872 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5873 		wm_get_hw_control(sc);
   5874 
   5875 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5876 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5877 		wm_legacy_irq_quirk_spt(sc);
   5878 
   5879 	/* Init hardware bits */
   5880 	wm_initialize_hardware_bits(sc);
   5881 
   5882 	/* Reset the PHY. */
   5883 	if (sc->sc_flags & WM_F_HAS_MII)
   5884 		wm_gmii_reset(sc);
   5885 
   5886 	if (sc->sc_type >= WM_T_ICH8) {
   5887 		reg = CSR_READ(sc, WMREG_GCR);
   5888 		/*
   5889 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5890 		 * default after reset.
   5891 		 */
   5892 		if (sc->sc_type == WM_T_ICH8)
   5893 			reg |= GCR_NO_SNOOP_ALL;
   5894 		else
   5895 			reg &= ~GCR_NO_SNOOP_ALL;
   5896 		CSR_WRITE(sc, WMREG_GCR, reg);
   5897 	}
   5898 	if ((sc->sc_type >= WM_T_ICH8)
   5899 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5900 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5901 
   5902 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5903 		reg |= CTRL_EXT_RO_DIS;
   5904 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5905 	}
   5906 
   5907 	/* Calculate (E)ITR value */
   5908 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5909 		/*
   5910 		 * For NEWQUEUE's EITR (except for 82575).
   5911 		 * 82575's EITR should be set same throttling value as other
   5912 		 * old controllers' ITR because the interrupt/sec calculation
   5913 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5914 		 *
   5915 		 * 82574's EITR should be set same throttling value as ITR.
   5916 		 *
   5917 		 * For N interrupts/sec, set this value to:
   5918 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5919 		 */
   5920 		sc->sc_itr_init = 450;
   5921 	} else if (sc->sc_type >= WM_T_82543) {
   5922 		/*
   5923 		 * Set up the interrupt throttling register (units of 256ns)
   5924 		 * Note that a footnote in Intel's documentation says this
   5925 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5926 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5927 		 * that that is also true for the 1024ns units of the other
   5928 		 * interrupt-related timer registers -- so, really, we ought
   5929 		 * to divide this value by 4 when the link speed is low.
   5930 		 *
   5931 		 * XXX implement this division at link speed change!
   5932 		 */
   5933 
   5934 		/*
   5935 		 * For N interrupts/sec, set this value to:
   5936 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5937 		 * absolute and packet timer values to this value
   5938 		 * divided by 4 to get "simple timer" behavior.
   5939 		 */
   5940 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5941 	}
   5942 
   5943 	error = wm_init_txrx_queues(sc);
   5944 	if (error)
   5945 		goto out;
   5946 
   5947 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   5948 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   5949 	    (sc->sc_type >= WM_T_82575))
   5950 		wm_serdes_power_up_link_82575(sc);
   5951 
   5952 	/* Clear out the VLAN table -- we don't use it (yet). */
   5953 	CSR_WRITE(sc, WMREG_VET, 0);
   5954 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5955 		trynum = 10; /* Due to hw errata */
   5956 	else
   5957 		trynum = 1;
   5958 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5959 		for (j = 0; j < trynum; j++)
   5960 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5961 
   5962 	/*
   5963 	 * Set up flow-control parameters.
   5964 	 *
   5965 	 * XXX Values could probably stand some tuning.
   5966 	 */
   5967 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5968 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5969 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5970 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5971 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5972 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5973 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5974 	}
   5975 
   5976 	sc->sc_fcrtl = FCRTL_DFLT;
   5977 	if (sc->sc_type < WM_T_82543) {
   5978 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5979 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5980 	} else {
   5981 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5982 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5983 	}
   5984 
   5985 	if (sc->sc_type == WM_T_80003)
   5986 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5987 	else
   5988 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5989 
   5990 	/* Writes the control register. */
   5991 	wm_set_vlan(sc);
   5992 
   5993 	if (sc->sc_flags & WM_F_HAS_MII) {
   5994 		uint16_t kmreg;
   5995 
   5996 		switch (sc->sc_type) {
   5997 		case WM_T_80003:
   5998 		case WM_T_ICH8:
   5999 		case WM_T_ICH9:
   6000 		case WM_T_ICH10:
   6001 		case WM_T_PCH:
   6002 		case WM_T_PCH2:
   6003 		case WM_T_PCH_LPT:
   6004 		case WM_T_PCH_SPT:
   6005 		case WM_T_PCH_CNP:
   6006 			/*
   6007 			 * Set the mac to wait the maximum time between each
   6008 			 * iteration and increase the max iterations when
   6009 			 * polling the phy; this fixes erroneous timeouts at
   6010 			 * 10Mbps.
   6011 			 */
   6012 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6013 			    0xFFFF);
   6014 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6015 			    &kmreg);
   6016 			kmreg |= 0x3F;
   6017 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6018 			    kmreg);
   6019 			break;
   6020 		default:
   6021 			break;
   6022 		}
   6023 
   6024 		if (sc->sc_type == WM_T_80003) {
   6025 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6026 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6027 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6028 
   6029 			/* Bypass RX and TX FIFO's */
   6030 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6031 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6032 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6033 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6034 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6035 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6036 		}
   6037 	}
   6038 #if 0
   6039 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6040 #endif
   6041 
   6042 	/* Set up checksum offload parameters. */
   6043 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6044 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6045 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6046 		reg |= RXCSUM_IPOFL;
   6047 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6048 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6049 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6050 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6051 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6052 
   6053 	/* Set registers about MSI-X */
   6054 	if (wm_is_using_msix(sc)) {
   6055 		uint32_t ivar, qintr_idx;
   6056 		struct wm_queue *wmq;
   6057 		unsigned int qid;
   6058 
   6059 		if (sc->sc_type == WM_T_82575) {
   6060 			/* Interrupt control */
   6061 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6062 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6063 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6064 
   6065 			/* TX and RX */
   6066 			for (i = 0; i < sc->sc_nqueues; i++) {
   6067 				wmq = &sc->sc_queue[i];
   6068 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6069 				    EITR_TX_QUEUE(wmq->wmq_id)
   6070 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6071 			}
   6072 			/* Link status */
   6073 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6074 			    EITR_OTHER);
   6075 		} else if (sc->sc_type == WM_T_82574) {
   6076 			/* Interrupt control */
   6077 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6078 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6079 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6080 
   6081 			/*
   6082 			 * Workaround issue with spurious interrupts
   6083 			 * in MSI-X mode.
   6084 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6085 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6086 			 */
   6087 			reg = CSR_READ(sc, WMREG_RFCTL);
   6088 			reg |= WMREG_RFCTL_ACKDIS;
   6089 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6090 
   6091 			ivar = 0;
   6092 			/* TX and RX */
   6093 			for (i = 0; i < sc->sc_nqueues; i++) {
   6094 				wmq = &sc->sc_queue[i];
   6095 				qid = wmq->wmq_id;
   6096 				qintr_idx = wmq->wmq_intr_idx;
   6097 
   6098 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6099 				    IVAR_TX_MASK_Q_82574(qid));
   6100 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6101 				    IVAR_RX_MASK_Q_82574(qid));
   6102 			}
   6103 			/* Link status */
   6104 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6105 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6106 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6107 		} else {
   6108 			/* Interrupt control */
   6109 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6110 			    | GPIE_EIAME | GPIE_PBA);
   6111 
   6112 			switch (sc->sc_type) {
   6113 			case WM_T_82580:
   6114 			case WM_T_I350:
   6115 			case WM_T_I354:
   6116 			case WM_T_I210:
   6117 			case WM_T_I211:
   6118 				/* TX and RX */
   6119 				for (i = 0; i < sc->sc_nqueues; i++) {
   6120 					wmq = &sc->sc_queue[i];
   6121 					qid = wmq->wmq_id;
   6122 					qintr_idx = wmq->wmq_intr_idx;
   6123 
   6124 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6125 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6126 					ivar |= __SHIFTIN((qintr_idx
   6127 						| IVAR_VALID),
   6128 					    IVAR_TX_MASK_Q(qid));
   6129 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6130 					ivar |= __SHIFTIN((qintr_idx
   6131 						| IVAR_VALID),
   6132 					    IVAR_RX_MASK_Q(qid));
   6133 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6134 				}
   6135 				break;
   6136 			case WM_T_82576:
   6137 				/* TX and RX */
   6138 				for (i = 0; i < sc->sc_nqueues; i++) {
   6139 					wmq = &sc->sc_queue[i];
   6140 					qid = wmq->wmq_id;
   6141 					qintr_idx = wmq->wmq_intr_idx;
   6142 
   6143 					ivar = CSR_READ(sc,
   6144 					    WMREG_IVAR_Q_82576(qid));
   6145 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6146 					ivar |= __SHIFTIN((qintr_idx
   6147 						| IVAR_VALID),
   6148 					    IVAR_TX_MASK_Q_82576(qid));
   6149 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6150 					ivar |= __SHIFTIN((qintr_idx
   6151 						| IVAR_VALID),
   6152 					    IVAR_RX_MASK_Q_82576(qid));
   6153 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6154 					    ivar);
   6155 				}
   6156 				break;
   6157 			default:
   6158 				break;
   6159 			}
   6160 
   6161 			/* Link status */
   6162 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6163 			    IVAR_MISC_OTHER);
   6164 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6165 		}
   6166 
   6167 		if (wm_is_using_multiqueue(sc)) {
   6168 			wm_init_rss(sc);
   6169 
   6170 			/*
   6171 			** NOTE: Receive Full-Packet Checksum Offload
   6172 			** is mutually exclusive with Multiqueue. However
   6173 			** this is not the same as TCP/IP checksums which
   6174 			** still work.
   6175 			*/
   6176 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6177 			reg |= RXCSUM_PCSD;
   6178 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6179 		}
   6180 	}
   6181 
   6182 	/* Set up the interrupt registers. */
   6183 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6184 
   6185 	/* Enable SFP module insertion interrupt if it's required */
   6186 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6187 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6188 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6189 		sfp_mask = ICR_GPI(0);
   6190 	}
   6191 
   6192 	if (wm_is_using_msix(sc)) {
   6193 		uint32_t mask;
   6194 		struct wm_queue *wmq;
   6195 
   6196 		switch (sc->sc_type) {
   6197 		case WM_T_82574:
   6198 			mask = 0;
   6199 			for (i = 0; i < sc->sc_nqueues; i++) {
   6200 				wmq = &sc->sc_queue[i];
   6201 				mask |= ICR_TXQ(wmq->wmq_id);
   6202 				mask |= ICR_RXQ(wmq->wmq_id);
   6203 			}
   6204 			mask |= ICR_OTHER;
   6205 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6206 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6207 			break;
   6208 		default:
   6209 			if (sc->sc_type == WM_T_82575) {
   6210 				mask = 0;
   6211 				for (i = 0; i < sc->sc_nqueues; i++) {
   6212 					wmq = &sc->sc_queue[i];
   6213 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6214 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6215 				}
   6216 				mask |= EITR_OTHER;
   6217 			} else {
   6218 				mask = 0;
   6219 				for (i = 0; i < sc->sc_nqueues; i++) {
   6220 					wmq = &sc->sc_queue[i];
   6221 					mask |= 1 << wmq->wmq_intr_idx;
   6222 				}
   6223 				mask |= 1 << sc->sc_link_intr_idx;
   6224 			}
   6225 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6226 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6227 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6228 
   6229 			/* For other interrupts */
   6230 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6231 			break;
   6232 		}
   6233 	} else {
   6234 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6235 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6236 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6237 	}
   6238 
   6239 	/* Set up the inter-packet gap. */
   6240 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6241 
   6242 	if (sc->sc_type >= WM_T_82543) {
   6243 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6244 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6245 			wm_itrs_writereg(sc, wmq);
   6246 		}
   6247 		/*
   6248 		 * Link interrupts occur much less than TX
   6249 		 * interrupts and RX interrupts. So, we don't
   6250 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6251 		 * FreeBSD's if_igb.
   6252 		 */
   6253 	}
   6254 
   6255 	/* Set the VLAN ethernetype. */
   6256 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6257 
   6258 	/*
   6259 	 * Set up the transmit control register; we start out with
   6260 	 * a collision distance suitable for FDX, but update it whe
   6261 	 * we resolve the media type.
   6262 	 */
   6263 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6264 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6265 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6266 	if (sc->sc_type >= WM_T_82571)
   6267 		sc->sc_tctl |= TCTL_MULR;
   6268 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6269 
   6270 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6271 		/* Write TDT after TCTL.EN is set. See the document. */
   6272 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6273 	}
   6274 
   6275 	if (sc->sc_type == WM_T_80003) {
   6276 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6277 		reg &= ~TCTL_EXT_GCEX_MASK;
   6278 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6279 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6280 	}
   6281 
   6282 	/* Set the media. */
   6283 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6284 		goto out;
   6285 
   6286 	/* Configure for OS presence */
   6287 	wm_init_manageability(sc);
   6288 
   6289 	/*
   6290 	 * Set up the receive control register; we actually program the
   6291 	 * register when we set the receive filter. Use multicast address
   6292 	 * offset type 0.
   6293 	 *
   6294 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6295 	 * don't enable that feature.
   6296 	 */
   6297 	sc->sc_mchash_type = 0;
   6298 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6299 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6300 
   6301 	/* 82574 use one buffer extended Rx descriptor. */
   6302 	if (sc->sc_type == WM_T_82574)
   6303 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6304 
   6305 	/*
   6306 	 * The I350 has a bug where it always strips the CRC whether
   6307 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6308 	 */
   6309 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6310 	    || (sc->sc_type == WM_T_I210))
   6311 		sc->sc_rctl |= RCTL_SECRC;
   6312 
   6313 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6314 	    && (ifp->if_mtu > ETHERMTU)) {
   6315 		sc->sc_rctl |= RCTL_LPE;
   6316 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6317 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6318 	}
   6319 
   6320 	if (MCLBYTES == 2048)
   6321 		sc->sc_rctl |= RCTL_2k;
   6322 	else {
   6323 		if (sc->sc_type >= WM_T_82543) {
   6324 			switch (MCLBYTES) {
   6325 			case 4096:
   6326 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6327 				break;
   6328 			case 8192:
   6329 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6330 				break;
   6331 			case 16384:
   6332 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6333 				break;
   6334 			default:
   6335 				panic("wm_init: MCLBYTES %d unsupported",
   6336 				    MCLBYTES);
   6337 				break;
   6338 			}
   6339 		} else
   6340 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6341 	}
   6342 
   6343 	/* Enable ECC */
   6344 	switch (sc->sc_type) {
   6345 	case WM_T_82571:
   6346 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6347 		reg |= PBA_ECC_CORR_EN;
   6348 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6349 		break;
   6350 	case WM_T_PCH_LPT:
   6351 	case WM_T_PCH_SPT:
   6352 	case WM_T_PCH_CNP:
   6353 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6354 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6355 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6356 
   6357 		sc->sc_ctrl |= CTRL_MEHE;
   6358 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6359 		break;
   6360 	default:
   6361 		break;
   6362 	}
   6363 
   6364 	/*
   6365 	 * Set the receive filter.
   6366 	 *
   6367 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6368 	 * the setting of RCTL.EN in wm_set_filter()
   6369 	 */
   6370 	wm_set_filter(sc);
   6371 
   6372 	/* On 575 and later set RDT only if RX enabled */
   6373 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6374 		int qidx;
   6375 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6376 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6377 			for (i = 0; i < WM_NRXDESC; i++) {
   6378 				mutex_enter(rxq->rxq_lock);
   6379 				wm_init_rxdesc(rxq, i);
   6380 				mutex_exit(rxq->rxq_lock);
   6381 
   6382 			}
   6383 		}
   6384 	}
   6385 
   6386 	wm_unset_stopping_flags(sc);
   6387 
   6388 	/* Start the one second link check clock. */
   6389 	callout_schedule(&sc->sc_tick_ch, hz);
   6390 
   6391 	/* ...all done! */
   6392 	ifp->if_flags |= IFF_RUNNING;
   6393 
   6394  out:
   6395 	/* Save last flags for the callback */
   6396 	sc->sc_if_flags = ifp->if_flags;
   6397 	sc->sc_ec_capenable = ec->ec_capenable;
   6398 	if (error)
   6399 		log(LOG_ERR, "%s: interface not running\n",
   6400 		    device_xname(sc->sc_dev));
   6401 	return error;
   6402 }
   6403 
   6404 /*
   6405  * wm_stop:		[ifnet interface function]
   6406  *
   6407  *	Stop transmission on the interface.
   6408  */
   6409 static void
   6410 wm_stop(struct ifnet *ifp, int disable)
   6411 {
   6412 	struct wm_softc *sc = ifp->if_softc;
   6413 
   6414 	ASSERT_SLEEPABLE();
   6415 
   6416 	WM_CORE_LOCK(sc);
   6417 	wm_stop_locked(ifp, disable ? true : false, true);
   6418 	WM_CORE_UNLOCK(sc);
   6419 
   6420 	/*
   6421 	 * After wm_set_stopping_flags(), it is guaranteed
   6422 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6423 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6424 	 * because it can sleep...
   6425 	 * so, call workqueue_wait() here.
   6426 	 */
   6427 	for (int i = 0; i < sc->sc_nqueues; i++)
   6428 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6429 }
   6430 
   6431 static void
   6432 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   6433 {
   6434 	struct wm_softc *sc = ifp->if_softc;
   6435 	struct wm_txsoft *txs;
   6436 	int i, qidx;
   6437 
   6438 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6439 		device_xname(sc->sc_dev), __func__));
   6440 	KASSERT(WM_CORE_LOCKED(sc));
   6441 
   6442 	wm_set_stopping_flags(sc);
   6443 
   6444 	if (sc->sc_flags & WM_F_HAS_MII) {
   6445 		/* Down the MII. */
   6446 		mii_down(&sc->sc_mii);
   6447 	} else {
   6448 #if 0
   6449 		/* Should we clear PHY's status properly? */
   6450 		wm_reset(sc);
   6451 #endif
   6452 	}
   6453 
   6454 	/* Stop the transmit and receive processes. */
   6455 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6456 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6457 	sc->sc_rctl &= ~RCTL_EN;
   6458 
   6459 	/*
   6460 	 * Clear the interrupt mask to ensure the device cannot assert its
   6461 	 * interrupt line.
   6462 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6463 	 * service any currently pending or shared interrupt.
   6464 	 */
   6465 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6466 	sc->sc_icr = 0;
   6467 	if (wm_is_using_msix(sc)) {
   6468 		if (sc->sc_type != WM_T_82574) {
   6469 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6470 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6471 		} else
   6472 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6473 	}
   6474 
   6475 	/*
   6476 	 * Stop callouts after interrupts are disabled; if we have
   6477 	 * to wait for them, we will be releasing the CORE_LOCK
   6478 	 * briefly, which will unblock interrupts on the current CPU.
   6479 	 */
   6480 
   6481 	/* Stop the one second clock. */
   6482 	if (wait)
   6483 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   6484 	else
   6485 		callout_stop(&sc->sc_tick_ch);
   6486 
   6487 	/* Stop the 82547 Tx FIFO stall check timer. */
   6488 	if (sc->sc_type == WM_T_82547) {
   6489 		if (wait)
   6490 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   6491 		else
   6492 			callout_stop(&sc->sc_txfifo_ch);
   6493 	}
   6494 
   6495 	/* Release any queued transmit buffers. */
   6496 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6497 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6498 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6499 		mutex_enter(txq->txq_lock);
   6500 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6501 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6502 			txs = &txq->txq_soft[i];
   6503 			if (txs->txs_mbuf != NULL) {
   6504 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6505 				m_freem(txs->txs_mbuf);
   6506 				txs->txs_mbuf = NULL;
   6507 			}
   6508 		}
   6509 		mutex_exit(txq->txq_lock);
   6510 	}
   6511 
   6512 	/* Mark the interface as down and cancel the watchdog timer. */
   6513 	ifp->if_flags &= ~IFF_RUNNING;
   6514 
   6515 	if (disable) {
   6516 		for (i = 0; i < sc->sc_nqueues; i++) {
   6517 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6518 			mutex_enter(rxq->rxq_lock);
   6519 			wm_rxdrain(rxq);
   6520 			mutex_exit(rxq->rxq_lock);
   6521 		}
   6522 	}
   6523 
   6524 #if 0 /* notyet */
   6525 	if (sc->sc_type >= WM_T_82544)
   6526 		CSR_WRITE(sc, WMREG_WUC, 0);
   6527 #endif
   6528 }
   6529 
   6530 static void
   6531 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6532 {
   6533 	struct mbuf *m;
   6534 	int i;
   6535 
   6536 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6537 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6538 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6539 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6540 		    m->m_data, m->m_len, m->m_flags);
   6541 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6542 	    i, i == 1 ? "" : "s");
   6543 }
   6544 
   6545 /*
   6546  * wm_82547_txfifo_stall:
   6547  *
   6548  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6549  *	reset the FIFO pointers, and restart packet transmission.
   6550  */
   6551 static void
   6552 wm_82547_txfifo_stall(void *arg)
   6553 {
   6554 	struct wm_softc *sc = arg;
   6555 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6556 
   6557 	mutex_enter(txq->txq_lock);
   6558 
   6559 	if (txq->txq_stopping)
   6560 		goto out;
   6561 
   6562 	if (txq->txq_fifo_stall) {
   6563 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6564 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6565 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6566 			/*
   6567 			 * Packets have drained.  Stop transmitter, reset
   6568 			 * FIFO pointers, restart transmitter, and kick
   6569 			 * the packet queue.
   6570 			 */
   6571 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6572 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6573 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6574 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6575 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6576 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6577 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6578 			CSR_WRITE_FLUSH(sc);
   6579 
   6580 			txq->txq_fifo_head = 0;
   6581 			txq->txq_fifo_stall = 0;
   6582 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6583 		} else {
   6584 			/*
   6585 			 * Still waiting for packets to drain; try again in
   6586 			 * another tick.
   6587 			 */
   6588 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6589 		}
   6590 	}
   6591 
   6592 out:
   6593 	mutex_exit(txq->txq_lock);
   6594 }
   6595 
   6596 /*
   6597  * wm_82547_txfifo_bugchk:
   6598  *
   6599  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6600  *	prevent enqueueing a packet that would wrap around the end
   6601  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6602  *
   6603  *	We do this by checking the amount of space before the end
   6604  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6605  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6606  *	the internal FIFO pointers to the beginning, and restart
   6607  *	transmission on the interface.
   6608  */
   6609 #define	WM_FIFO_HDR		0x10
   6610 #define	WM_82547_PAD_LEN	0x3e0
   6611 static int
   6612 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6613 {
   6614 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6615 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6616 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6617 
   6618 	/* Just return if already stalled. */
   6619 	if (txq->txq_fifo_stall)
   6620 		return 1;
   6621 
   6622 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6623 		/* Stall only occurs in half-duplex mode. */
   6624 		goto send_packet;
   6625 	}
   6626 
   6627 	if (len >= WM_82547_PAD_LEN + space) {
   6628 		txq->txq_fifo_stall = 1;
   6629 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6630 		return 1;
   6631 	}
   6632 
   6633  send_packet:
   6634 	txq->txq_fifo_head += len;
   6635 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6636 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6637 
   6638 	return 0;
   6639 }
   6640 
   6641 static int
   6642 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6643 {
   6644 	int error;
   6645 
   6646 	/*
   6647 	 * Allocate the control data structures, and create and load the
   6648 	 * DMA map for it.
   6649 	 *
   6650 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6651 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6652 	 * both sets within the same 4G segment.
   6653 	 */
   6654 	if (sc->sc_type < WM_T_82544)
   6655 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6656 	else
   6657 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6658 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6659 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6660 	else
   6661 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6662 
   6663 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6664 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6665 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6666 		aprint_error_dev(sc->sc_dev,
   6667 		    "unable to allocate TX control data, error = %d\n",
   6668 		    error);
   6669 		goto fail_0;
   6670 	}
   6671 
   6672 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6673 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6674 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6675 		aprint_error_dev(sc->sc_dev,
   6676 		    "unable to map TX control data, error = %d\n", error);
   6677 		goto fail_1;
   6678 	}
   6679 
   6680 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6681 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6682 		aprint_error_dev(sc->sc_dev,
   6683 		    "unable to create TX control data DMA map, error = %d\n",
   6684 		    error);
   6685 		goto fail_2;
   6686 	}
   6687 
   6688 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6689 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6690 		aprint_error_dev(sc->sc_dev,
   6691 		    "unable to load TX control data DMA map, error = %d\n",
   6692 		    error);
   6693 		goto fail_3;
   6694 	}
   6695 
   6696 	return 0;
   6697 
   6698  fail_3:
   6699 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6700  fail_2:
   6701 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6702 	    WM_TXDESCS_SIZE(txq));
   6703  fail_1:
   6704 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6705  fail_0:
   6706 	return error;
   6707 }
   6708 
   6709 static void
   6710 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6711 {
   6712 
   6713 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6714 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6715 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6716 	    WM_TXDESCS_SIZE(txq));
   6717 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6718 }
   6719 
   6720 static int
   6721 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6722 {
   6723 	int error;
   6724 	size_t rxq_descs_size;
   6725 
   6726 	/*
   6727 	 * Allocate the control data structures, and create and load the
   6728 	 * DMA map for it.
   6729 	 *
   6730 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6731 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6732 	 * both sets within the same 4G segment.
   6733 	 */
   6734 	rxq->rxq_ndesc = WM_NRXDESC;
   6735 	if (sc->sc_type == WM_T_82574)
   6736 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6737 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6738 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6739 	else
   6740 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6741 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6742 
   6743 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6744 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6745 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6746 		aprint_error_dev(sc->sc_dev,
   6747 		    "unable to allocate RX control data, error = %d\n",
   6748 		    error);
   6749 		goto fail_0;
   6750 	}
   6751 
   6752 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6753 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6754 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6755 		aprint_error_dev(sc->sc_dev,
   6756 		    "unable to map RX control data, error = %d\n", error);
   6757 		goto fail_1;
   6758 	}
   6759 
   6760 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6761 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6762 		aprint_error_dev(sc->sc_dev,
   6763 		    "unable to create RX control data DMA map, error = %d\n",
   6764 		    error);
   6765 		goto fail_2;
   6766 	}
   6767 
   6768 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6769 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6770 		aprint_error_dev(sc->sc_dev,
   6771 		    "unable to load RX control data DMA map, error = %d\n",
   6772 		    error);
   6773 		goto fail_3;
   6774 	}
   6775 
   6776 	return 0;
   6777 
   6778  fail_3:
   6779 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6780  fail_2:
   6781 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6782 	    rxq_descs_size);
   6783  fail_1:
   6784 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6785  fail_0:
   6786 	return error;
   6787 }
   6788 
   6789 static void
   6790 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6791 {
   6792 
   6793 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6794 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6795 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6796 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6797 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6798 }
   6799 
   6800 
   6801 static int
   6802 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6803 {
   6804 	int i, error;
   6805 
   6806 	/* Create the transmit buffer DMA maps. */
   6807 	WM_TXQUEUELEN(txq) =
   6808 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6809 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6810 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6811 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6812 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6813 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6814 			aprint_error_dev(sc->sc_dev,
   6815 			    "unable to create Tx DMA map %d, error = %d\n",
   6816 			    i, error);
   6817 			goto fail;
   6818 		}
   6819 	}
   6820 
   6821 	return 0;
   6822 
   6823  fail:
   6824 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6825 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6826 			bus_dmamap_destroy(sc->sc_dmat,
   6827 			    txq->txq_soft[i].txs_dmamap);
   6828 	}
   6829 	return error;
   6830 }
   6831 
   6832 static void
   6833 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6834 {
   6835 	int i;
   6836 
   6837 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6838 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6839 			bus_dmamap_destroy(sc->sc_dmat,
   6840 			    txq->txq_soft[i].txs_dmamap);
   6841 	}
   6842 }
   6843 
   6844 static int
   6845 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6846 {
   6847 	int i, error;
   6848 
   6849 	/* Create the receive buffer DMA maps. */
   6850 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6851 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6852 			    MCLBYTES, 0, 0,
   6853 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6854 			aprint_error_dev(sc->sc_dev,
   6855 			    "unable to create Rx DMA map %d error = %d\n",
   6856 			    i, error);
   6857 			goto fail;
   6858 		}
   6859 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6860 	}
   6861 
   6862 	return 0;
   6863 
   6864  fail:
   6865 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6866 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6867 			bus_dmamap_destroy(sc->sc_dmat,
   6868 			    rxq->rxq_soft[i].rxs_dmamap);
   6869 	}
   6870 	return error;
   6871 }
   6872 
   6873 static void
   6874 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6875 {
   6876 	int i;
   6877 
   6878 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6879 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6880 			bus_dmamap_destroy(sc->sc_dmat,
   6881 			    rxq->rxq_soft[i].rxs_dmamap);
   6882 	}
   6883 }
   6884 
   6885 /*
   6886  * wm_alloc_quques:
   6887  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6888  */
   6889 static int
   6890 wm_alloc_txrx_queues(struct wm_softc *sc)
   6891 {
   6892 	int i, error, tx_done, rx_done;
   6893 
   6894 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6895 	    KM_SLEEP);
   6896 	if (sc->sc_queue == NULL) {
   6897 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6898 		error = ENOMEM;
   6899 		goto fail_0;
   6900 	}
   6901 
   6902 	/* For transmission */
   6903 	error = 0;
   6904 	tx_done = 0;
   6905 	for (i = 0; i < sc->sc_nqueues; i++) {
   6906 #ifdef WM_EVENT_COUNTERS
   6907 		int j;
   6908 		const char *xname;
   6909 #endif
   6910 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6911 		txq->txq_sc = sc;
   6912 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6913 
   6914 		error = wm_alloc_tx_descs(sc, txq);
   6915 		if (error)
   6916 			break;
   6917 		error = wm_alloc_tx_buffer(sc, txq);
   6918 		if (error) {
   6919 			wm_free_tx_descs(sc, txq);
   6920 			break;
   6921 		}
   6922 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6923 		if (txq->txq_interq == NULL) {
   6924 			wm_free_tx_descs(sc, txq);
   6925 			wm_free_tx_buffer(sc, txq);
   6926 			error = ENOMEM;
   6927 			break;
   6928 		}
   6929 
   6930 #ifdef WM_EVENT_COUNTERS
   6931 		xname = device_xname(sc->sc_dev);
   6932 
   6933 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6934 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6935 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6936 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6937 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6938 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6939 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6940 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6941 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6942 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6943 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6944 
   6945 		for (j = 0; j < WM_NTXSEGS; j++) {
   6946 			snprintf(txq->txq_txseg_evcnt_names[j],
   6947 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6948 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6949 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6950 		}
   6951 
   6952 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6953 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6954 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6955 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6956 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6957 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   6958 #endif /* WM_EVENT_COUNTERS */
   6959 
   6960 		tx_done++;
   6961 	}
   6962 	if (error)
   6963 		goto fail_1;
   6964 
   6965 	/* For receive */
   6966 	error = 0;
   6967 	rx_done = 0;
   6968 	for (i = 0; i < sc->sc_nqueues; i++) {
   6969 #ifdef WM_EVENT_COUNTERS
   6970 		const char *xname;
   6971 #endif
   6972 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6973 		rxq->rxq_sc = sc;
   6974 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6975 
   6976 		error = wm_alloc_rx_descs(sc, rxq);
   6977 		if (error)
   6978 			break;
   6979 
   6980 		error = wm_alloc_rx_buffer(sc, rxq);
   6981 		if (error) {
   6982 			wm_free_rx_descs(sc, rxq);
   6983 			break;
   6984 		}
   6985 
   6986 #ifdef WM_EVENT_COUNTERS
   6987 		xname = device_xname(sc->sc_dev);
   6988 
   6989 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6990 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6991 
   6992 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6993 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6994 #endif /* WM_EVENT_COUNTERS */
   6995 
   6996 		rx_done++;
   6997 	}
   6998 	if (error)
   6999 		goto fail_2;
   7000 
   7001 	for (i = 0; i < sc->sc_nqueues; i++) {
   7002 		char rndname[16];
   7003 
   7004 		snprintf(rndname, sizeof(rndname), "%sTXRX%d",
   7005 		    device_xname(sc->sc_dev), i);
   7006 		rnd_attach_source(&sc->sc_queue[i].rnd_source, rndname,
   7007 		    RND_TYPE_NET, RND_FLAG_DEFAULT);
   7008 	}
   7009 
   7010 	return 0;
   7011 
   7012  fail_2:
   7013 	for (i = 0; i < rx_done; i++) {
   7014 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7015 		wm_free_rx_buffer(sc, rxq);
   7016 		wm_free_rx_descs(sc, rxq);
   7017 		if (rxq->rxq_lock)
   7018 			mutex_obj_free(rxq->rxq_lock);
   7019 	}
   7020  fail_1:
   7021 	for (i = 0; i < tx_done; i++) {
   7022 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7023 		pcq_destroy(txq->txq_interq);
   7024 		wm_free_tx_buffer(sc, txq);
   7025 		wm_free_tx_descs(sc, txq);
   7026 		if (txq->txq_lock)
   7027 			mutex_obj_free(txq->txq_lock);
   7028 	}
   7029 
   7030 	kmem_free(sc->sc_queue,
   7031 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7032  fail_0:
   7033 	return error;
   7034 }
   7035 
   7036 /*
   7037  * wm_free_quques:
   7038  *	Free {tx,rx}descs and {tx,rx} buffers
   7039  */
   7040 static void
   7041 wm_free_txrx_queues(struct wm_softc *sc)
   7042 {
   7043 	int i;
   7044 
   7045 	for (i = 0; i < sc->sc_nqueues; i++)
   7046 		rnd_detach_source(&sc->sc_queue[i].rnd_source);
   7047 
   7048 	for (i = 0; i < sc->sc_nqueues; i++) {
   7049 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7050 
   7051 #ifdef WM_EVENT_COUNTERS
   7052 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7053 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7054 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7055 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7056 #endif /* WM_EVENT_COUNTERS */
   7057 
   7058 		wm_free_rx_buffer(sc, rxq);
   7059 		wm_free_rx_descs(sc, rxq);
   7060 		if (rxq->rxq_lock)
   7061 			mutex_obj_free(rxq->rxq_lock);
   7062 	}
   7063 
   7064 	for (i = 0; i < sc->sc_nqueues; i++) {
   7065 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7066 		struct mbuf *m;
   7067 #ifdef WM_EVENT_COUNTERS
   7068 		int j;
   7069 
   7070 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7071 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7072 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7073 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7074 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7075 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7076 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7077 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7078 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7079 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7080 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7081 
   7082 		for (j = 0; j < WM_NTXSEGS; j++)
   7083 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7084 
   7085 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7086 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7087 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7088 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7089 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7090 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7091 #endif /* WM_EVENT_COUNTERS */
   7092 
   7093 		/* Drain txq_interq */
   7094 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7095 			m_freem(m);
   7096 		pcq_destroy(txq->txq_interq);
   7097 
   7098 		wm_free_tx_buffer(sc, txq);
   7099 		wm_free_tx_descs(sc, txq);
   7100 		if (txq->txq_lock)
   7101 			mutex_obj_free(txq->txq_lock);
   7102 	}
   7103 
   7104 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7105 }
   7106 
   7107 static void
   7108 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7109 {
   7110 
   7111 	KASSERT(mutex_owned(txq->txq_lock));
   7112 
   7113 	/* Initialize the transmit descriptor ring. */
   7114 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7115 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7116 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7117 	txq->txq_free = WM_NTXDESC(txq);
   7118 	txq->txq_next = 0;
   7119 }
   7120 
   7121 static void
   7122 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7123     struct wm_txqueue *txq)
   7124 {
   7125 
   7126 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7127 		device_xname(sc->sc_dev), __func__));
   7128 	KASSERT(mutex_owned(txq->txq_lock));
   7129 
   7130 	if (sc->sc_type < WM_T_82543) {
   7131 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7132 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7133 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7134 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7135 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7136 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7137 	} else {
   7138 		int qid = wmq->wmq_id;
   7139 
   7140 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7141 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7142 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7143 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7144 
   7145 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7146 			/*
   7147 			 * Don't write TDT before TCTL.EN is set.
   7148 			 * See the document.
   7149 			 */
   7150 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7151 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7152 			    | TXDCTL_WTHRESH(0));
   7153 		else {
   7154 			/* XXX should update with AIM? */
   7155 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7156 			if (sc->sc_type >= WM_T_82540) {
   7157 				/* Should be the same */
   7158 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7159 			}
   7160 
   7161 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7162 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7163 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7164 		}
   7165 	}
   7166 }
   7167 
   7168 static void
   7169 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7170 {
   7171 	int i;
   7172 
   7173 	KASSERT(mutex_owned(txq->txq_lock));
   7174 
   7175 	/* Initialize the transmit job descriptors. */
   7176 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7177 		txq->txq_soft[i].txs_mbuf = NULL;
   7178 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7179 	txq->txq_snext = 0;
   7180 	txq->txq_sdirty = 0;
   7181 }
   7182 
   7183 static void
   7184 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7185     struct wm_txqueue *txq)
   7186 {
   7187 
   7188 	KASSERT(mutex_owned(txq->txq_lock));
   7189 
   7190 	/*
   7191 	 * Set up some register offsets that are different between
   7192 	 * the i82542 and the i82543 and later chips.
   7193 	 */
   7194 	if (sc->sc_type < WM_T_82543)
   7195 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7196 	else
   7197 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7198 
   7199 	wm_init_tx_descs(sc, txq);
   7200 	wm_init_tx_regs(sc, wmq, txq);
   7201 	wm_init_tx_buffer(sc, txq);
   7202 
   7203 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7204 	txq->txq_sending = false;
   7205 }
   7206 
   7207 static void
   7208 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7209     struct wm_rxqueue *rxq)
   7210 {
   7211 
   7212 	KASSERT(mutex_owned(rxq->rxq_lock));
   7213 
   7214 	/*
   7215 	 * Initialize the receive descriptor and receive job
   7216 	 * descriptor rings.
   7217 	 */
   7218 	if (sc->sc_type < WM_T_82543) {
   7219 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7220 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7221 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7222 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7223 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7224 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7225 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7226 
   7227 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7228 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7229 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7230 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7231 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7232 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7233 	} else {
   7234 		int qid = wmq->wmq_id;
   7235 
   7236 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7237 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7238 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7239 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7240 
   7241 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7242 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7243 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7244 
   7245 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7246 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7247 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7248 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7249 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7250 			    | RXDCTL_WTHRESH(1));
   7251 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7252 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7253 		} else {
   7254 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7255 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7256 			/* XXX should update with AIM? */
   7257 			CSR_WRITE(sc, WMREG_RDTR,
   7258 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7259 			/* MUST be same */
   7260 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7261 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7262 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7263 		}
   7264 	}
   7265 }
   7266 
   7267 static int
   7268 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7269 {
   7270 	struct wm_rxsoft *rxs;
   7271 	int error, i;
   7272 
   7273 	KASSERT(mutex_owned(rxq->rxq_lock));
   7274 
   7275 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7276 		rxs = &rxq->rxq_soft[i];
   7277 		if (rxs->rxs_mbuf == NULL) {
   7278 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7279 				log(LOG_ERR, "%s: unable to allocate or map "
   7280 				    "rx buffer %d, error = %d\n",
   7281 				    device_xname(sc->sc_dev), i, error);
   7282 				/*
   7283 				 * XXX Should attempt to run with fewer receive
   7284 				 * XXX buffers instead of just failing.
   7285 				 */
   7286 				wm_rxdrain(rxq);
   7287 				return ENOMEM;
   7288 			}
   7289 		} else {
   7290 			/*
   7291 			 * For 82575 and 82576, the RX descriptors must be
   7292 			 * initialized after the setting of RCTL.EN in
   7293 			 * wm_set_filter()
   7294 			 */
   7295 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7296 				wm_init_rxdesc(rxq, i);
   7297 		}
   7298 	}
   7299 	rxq->rxq_ptr = 0;
   7300 	rxq->rxq_discard = 0;
   7301 	WM_RXCHAIN_RESET(rxq);
   7302 
   7303 	return 0;
   7304 }
   7305 
   7306 static int
   7307 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7308     struct wm_rxqueue *rxq)
   7309 {
   7310 
   7311 	KASSERT(mutex_owned(rxq->rxq_lock));
   7312 
   7313 	/*
   7314 	 * Set up some register offsets that are different between
   7315 	 * the i82542 and the i82543 and later chips.
   7316 	 */
   7317 	if (sc->sc_type < WM_T_82543)
   7318 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7319 	else
   7320 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7321 
   7322 	wm_init_rx_regs(sc, wmq, rxq);
   7323 	return wm_init_rx_buffer(sc, rxq);
   7324 }
   7325 
   7326 /*
   7327  * wm_init_quques:
   7328  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7329  */
   7330 static int
   7331 wm_init_txrx_queues(struct wm_softc *sc)
   7332 {
   7333 	int i, error = 0;
   7334 
   7335 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7336 		device_xname(sc->sc_dev), __func__));
   7337 
   7338 	for (i = 0; i < sc->sc_nqueues; i++) {
   7339 		struct wm_queue *wmq = &sc->sc_queue[i];
   7340 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7341 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7342 
   7343 		/*
   7344 		 * TODO
   7345 		 * Currently, use constant variable instead of AIM.
   7346 		 * Furthermore, the interrupt interval of multiqueue which use
   7347 		 * polling mode is less than default value.
   7348 		 * More tuning and AIM are required.
   7349 		 */
   7350 		if (wm_is_using_multiqueue(sc))
   7351 			wmq->wmq_itr = 50;
   7352 		else
   7353 			wmq->wmq_itr = sc->sc_itr_init;
   7354 		wmq->wmq_set_itr = true;
   7355 
   7356 		mutex_enter(txq->txq_lock);
   7357 		wm_init_tx_queue(sc, wmq, txq);
   7358 		mutex_exit(txq->txq_lock);
   7359 
   7360 		mutex_enter(rxq->rxq_lock);
   7361 		error = wm_init_rx_queue(sc, wmq, rxq);
   7362 		mutex_exit(rxq->rxq_lock);
   7363 		if (error)
   7364 			break;
   7365 	}
   7366 
   7367 	return error;
   7368 }
   7369 
   7370 /*
   7371  * wm_tx_offload:
   7372  *
   7373  *	Set up TCP/IP checksumming parameters for the
   7374  *	specified packet.
   7375  */
   7376 static void
   7377 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7378     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7379 {
   7380 	struct mbuf *m0 = txs->txs_mbuf;
   7381 	struct livengood_tcpip_ctxdesc *t;
   7382 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7383 	uint32_t ipcse;
   7384 	struct ether_header *eh;
   7385 	int offset, iphl;
   7386 	uint8_t fields;
   7387 
   7388 	/*
   7389 	 * XXX It would be nice if the mbuf pkthdr had offset
   7390 	 * fields for the protocol headers.
   7391 	 */
   7392 
   7393 	eh = mtod(m0, struct ether_header *);
   7394 	switch (htons(eh->ether_type)) {
   7395 	case ETHERTYPE_IP:
   7396 	case ETHERTYPE_IPV6:
   7397 		offset = ETHER_HDR_LEN;
   7398 		break;
   7399 
   7400 	case ETHERTYPE_VLAN:
   7401 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7402 		break;
   7403 
   7404 	default:
   7405 		/* Don't support this protocol or encapsulation. */
   7406  		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7407  		txq->txq_last_hw_ipcs = 0;
   7408  		txq->txq_last_hw_tucs = 0;
   7409 		*fieldsp = 0;
   7410 		*cmdp = 0;
   7411 		return;
   7412 	}
   7413 
   7414 	if ((m0->m_pkthdr.csum_flags &
   7415 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7416 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7417 	} else
   7418 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7419 
   7420 	ipcse = offset + iphl - 1;
   7421 
   7422 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7423 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7424 	seg = 0;
   7425 	fields = 0;
   7426 
   7427 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7428 		int hlen = offset + iphl;
   7429 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7430 
   7431 		if (__predict_false(m0->m_len <
   7432 				    (hlen + sizeof(struct tcphdr)))) {
   7433 			/*
   7434 			 * TCP/IP headers are not in the first mbuf; we need
   7435 			 * to do this the slow and painful way. Let's just
   7436 			 * hope this doesn't happen very often.
   7437 			 */
   7438 			struct tcphdr th;
   7439 
   7440 			WM_Q_EVCNT_INCR(txq, tsopain);
   7441 
   7442 			m_copydata(m0, hlen, sizeof(th), &th);
   7443 			if (v4) {
   7444 				struct ip ip;
   7445 
   7446 				m_copydata(m0, offset, sizeof(ip), &ip);
   7447 				ip.ip_len = 0;
   7448 				m_copyback(m0,
   7449 				    offset + offsetof(struct ip, ip_len),
   7450 				    sizeof(ip.ip_len), &ip.ip_len);
   7451 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7452 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7453 			} else {
   7454 				struct ip6_hdr ip6;
   7455 
   7456 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7457 				ip6.ip6_plen = 0;
   7458 				m_copyback(m0,
   7459 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7460 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7461 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7462 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7463 			}
   7464 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7465 			    sizeof(th.th_sum), &th.th_sum);
   7466 
   7467 			hlen += th.th_off << 2;
   7468 		} else {
   7469 			/*
   7470 			 * TCP/IP headers are in the first mbuf; we can do
   7471 			 * this the easy way.
   7472 			 */
   7473 			struct tcphdr *th;
   7474 
   7475 			if (v4) {
   7476 				struct ip *ip =
   7477 				    (void *)(mtod(m0, char *) + offset);
   7478 				th = (void *)(mtod(m0, char *) + hlen);
   7479 
   7480 				ip->ip_len = 0;
   7481 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7482 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7483 			} else {
   7484 				struct ip6_hdr *ip6 =
   7485 				    (void *)(mtod(m0, char *) + offset);
   7486 				th = (void *)(mtod(m0, char *) + hlen);
   7487 
   7488 				ip6->ip6_plen = 0;
   7489 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7490 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7491 			}
   7492 			hlen += th->th_off << 2;
   7493 		}
   7494 
   7495 		if (v4) {
   7496 			WM_Q_EVCNT_INCR(txq, tso);
   7497 			cmdlen |= WTX_TCPIP_CMD_IP;
   7498 		} else {
   7499 			WM_Q_EVCNT_INCR(txq, tso6);
   7500 			ipcse = 0;
   7501 		}
   7502 		cmd |= WTX_TCPIP_CMD_TSE;
   7503 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7504 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7505 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7506 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7507 	}
   7508 
   7509 	/*
   7510 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7511 	 * offload feature, if we load the context descriptor, we
   7512 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7513 	 */
   7514 
   7515 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7516 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7517 	    WTX_TCPIP_IPCSE(ipcse);
   7518 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7519 		WM_Q_EVCNT_INCR(txq, ipsum);
   7520 		fields |= WTX_IXSM;
   7521 	}
   7522 
   7523 	offset += iphl;
   7524 
   7525 	if (m0->m_pkthdr.csum_flags &
   7526 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7527 		WM_Q_EVCNT_INCR(txq, tusum);
   7528 		fields |= WTX_TXSM;
   7529 		tucs = WTX_TCPIP_TUCSS(offset) |
   7530 		    WTX_TCPIP_TUCSO(offset +
   7531 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7532 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7533 	} else if ((m0->m_pkthdr.csum_flags &
   7534 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7535 		WM_Q_EVCNT_INCR(txq, tusum6);
   7536 		fields |= WTX_TXSM;
   7537 		tucs = WTX_TCPIP_TUCSS(offset) |
   7538 		    WTX_TCPIP_TUCSO(offset +
   7539 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7540 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7541 	} else {
   7542 		/* Just initialize it to a valid TCP context. */
   7543 		tucs = WTX_TCPIP_TUCSS(offset) |
   7544 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7545 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7546 	}
   7547 
   7548 	*cmdp = cmd;
   7549 	*fieldsp = fields;
   7550 
   7551 	/*
   7552 	 * We don't have to write context descriptor for every packet
   7553 	 * except for 82574. For 82574, we must write context descriptor
   7554 	 * for every packet when we use two descriptor queues.
   7555 	 *
   7556 	 * The 82574L can only remember the *last* context used
   7557 	 * regardless of queue that it was use for.  We cannot reuse
   7558 	 * contexts on this hardware platform and must generate a new
   7559 	 * context every time.  82574L hardware spec, section 7.2.6,
   7560 	 * second note.
   7561  	 *
   7562   	 * Setting up new checksum offload context for every
   7563 	 * frames takes a lot of processing time for hardware.
   7564 	 * This also reduces performance a lot for small sized
   7565 	 * frames so avoid it if driver can use previously
   7566 	 * configured checksum offload context.
   7567 	 * For TSO, in theory we can use the same TSO context if and only if
   7568 	 * frame is the same type(IP/TCP) and the same MSS. However
   7569 	 * checking whether a frame has the same IP/TCP structure is
   7570 	 * hard thing so just ignore that and always restablish a
   7571 	 * new TSO context.
   7572   	 */
   7573 	KASSERT(!wm_is_using_multiqueue(sc));
   7574 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) == 0) {
   7575 		if (txq->txq_last_hw_cmd == cmd &&
   7576 		    txq->txq_last_hw_fields == fields &&
   7577 		    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   7578 		    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   7579 			WM_Q_EVCNT_INCR(txq, skipcontext);
   7580 			return;
   7581 		}
   7582 	}
   7583 
   7584  	txq->txq_last_hw_cmd = cmd;
   7585  	txq->txq_last_hw_fields = fields;
   7586  	txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   7587 	txq->txq_last_hw_tucs = (tucs & 0xffff);
   7588 
   7589 	/* Fill in the context descriptor. */
   7590 	t = (struct livengood_tcpip_ctxdesc *)
   7591 	    &txq->txq_descs[txq->txq_next];
   7592 	t->tcpip_ipcs = htole32(ipcs);
   7593 	t->tcpip_tucs = htole32(tucs);
   7594 	t->tcpip_cmdlen = htole32(cmdlen);
   7595 	t->tcpip_seg = htole32(seg);
   7596 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7597 
   7598 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7599 	txs->txs_ndesc++;
   7600 }
   7601 
   7602 static inline int
   7603 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7604 {
   7605 	struct wm_softc *sc = ifp->if_softc;
   7606 	u_int cpuid = cpu_index(curcpu());
   7607 
   7608 	/*
   7609 	 * Currently, simple distribute strategy.
   7610 	 * TODO:
   7611 	 * distribute by flowid(RSS has value).
   7612 	 */
   7613 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7614 }
   7615 
   7616 /*
   7617  * wm_start:		[ifnet interface function]
   7618  *
   7619  *	Start packet transmission on the interface.
   7620  */
   7621 static void
   7622 wm_start(struct ifnet *ifp)
   7623 {
   7624 	struct wm_softc *sc = ifp->if_softc;
   7625 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7626 
   7627 #ifdef WM_MPSAFE
   7628 	KASSERT(if_is_mpsafe(ifp));
   7629 #endif
   7630 	/*
   7631 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7632 	 */
   7633 
   7634 	mutex_enter(txq->txq_lock);
   7635 	if (!txq->txq_stopping)
   7636 		wm_start_locked(ifp);
   7637 	mutex_exit(txq->txq_lock);
   7638 }
   7639 
   7640 static void
   7641 wm_start_locked(struct ifnet *ifp)
   7642 {
   7643 	struct wm_softc *sc = ifp->if_softc;
   7644 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7645 
   7646 	wm_send_common_locked(ifp, txq, false);
   7647 }
   7648 
   7649 static int
   7650 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7651 {
   7652 	int qid;
   7653 	struct wm_softc *sc = ifp->if_softc;
   7654 	struct wm_txqueue *txq;
   7655 
   7656 	qid = wm_select_txqueue(ifp, m);
   7657 	txq = &sc->sc_queue[qid].wmq_txq;
   7658 
   7659 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7660 		m_freem(m);
   7661 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7662 		return ENOBUFS;
   7663 	}
   7664 
   7665 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7666 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7667 	if (m->m_flags & M_MCAST)
   7668 		if_statinc_ref(nsr, if_omcasts);
   7669 	IF_STAT_PUTREF(ifp);
   7670 
   7671 	if (mutex_tryenter(txq->txq_lock)) {
   7672 		if (!txq->txq_stopping)
   7673 			wm_transmit_locked(ifp, txq);
   7674 		mutex_exit(txq->txq_lock);
   7675 	}
   7676 
   7677 	return 0;
   7678 }
   7679 
   7680 static void
   7681 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7682 {
   7683 
   7684 	wm_send_common_locked(ifp, txq, true);
   7685 }
   7686 
   7687 static void
   7688 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7689     bool is_transmit)
   7690 {
   7691 	struct wm_softc *sc = ifp->if_softc;
   7692 	struct mbuf *m0;
   7693 	struct wm_txsoft *txs;
   7694 	bus_dmamap_t dmamap;
   7695 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7696 	bus_addr_t curaddr;
   7697 	bus_size_t seglen, curlen;
   7698 	uint32_t cksumcmd;
   7699 	uint8_t cksumfields;
   7700 	bool remap = true;
   7701 
   7702 	KASSERT(mutex_owned(txq->txq_lock));
   7703 
   7704 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7705 		return;
   7706 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7707 		return;
   7708 
   7709 	/* Remember the previous number of free descriptors. */
   7710 	ofree = txq->txq_free;
   7711 
   7712 	/*
   7713 	 * Loop through the send queue, setting up transmit descriptors
   7714 	 * until we drain the queue, or use up all available transmit
   7715 	 * descriptors.
   7716 	 */
   7717 	for (;;) {
   7718 		m0 = NULL;
   7719 
   7720 		/* Get a work queue entry. */
   7721 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7722 			wm_txeof(txq, UINT_MAX);
   7723 			if (txq->txq_sfree == 0) {
   7724 				DPRINTF(WM_DEBUG_TX,
   7725 				    ("%s: TX: no free job descriptors\n",
   7726 					device_xname(sc->sc_dev)));
   7727 				WM_Q_EVCNT_INCR(txq, txsstall);
   7728 				break;
   7729 			}
   7730 		}
   7731 
   7732 		/* Grab a packet off the queue. */
   7733 		if (is_transmit)
   7734 			m0 = pcq_get(txq->txq_interq);
   7735 		else
   7736 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7737 		if (m0 == NULL)
   7738 			break;
   7739 
   7740 		DPRINTF(WM_DEBUG_TX,
   7741 		    ("%s: TX: have packet to transmit: %p\n",
   7742 			device_xname(sc->sc_dev), m0));
   7743 
   7744 		txs = &txq->txq_soft[txq->txq_snext];
   7745 		dmamap = txs->txs_dmamap;
   7746 
   7747 		use_tso = (m0->m_pkthdr.csum_flags &
   7748 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7749 
   7750 		/*
   7751 		 * So says the Linux driver:
   7752 		 * The controller does a simple calculation to make sure
   7753 		 * there is enough room in the FIFO before initiating the
   7754 		 * DMA for each buffer. The calc is:
   7755 		 *	4 = ceil(buffer len / MSS)
   7756 		 * To make sure we don't overrun the FIFO, adjust the max
   7757 		 * buffer len if the MSS drops.
   7758 		 */
   7759 		dmamap->dm_maxsegsz =
   7760 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7761 		    ? m0->m_pkthdr.segsz << 2
   7762 		    : WTX_MAX_LEN;
   7763 
   7764 		/*
   7765 		 * Load the DMA map.  If this fails, the packet either
   7766 		 * didn't fit in the allotted number of segments, or we
   7767 		 * were short on resources.  For the too-many-segments
   7768 		 * case, we simply report an error and drop the packet,
   7769 		 * since we can't sanely copy a jumbo packet to a single
   7770 		 * buffer.
   7771 		 */
   7772 retry:
   7773 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7774 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7775 		if (__predict_false(error)) {
   7776 			if (error == EFBIG) {
   7777 				if (remap == true) {
   7778 					struct mbuf *m;
   7779 
   7780 					remap = false;
   7781 					m = m_defrag(m0, M_NOWAIT);
   7782 					if (m != NULL) {
   7783 						WM_Q_EVCNT_INCR(txq, defrag);
   7784 						m0 = m;
   7785 						goto retry;
   7786 					}
   7787 				}
   7788 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7789 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7790 				    "DMA segments, dropping...\n",
   7791 				    device_xname(sc->sc_dev));
   7792 				wm_dump_mbuf_chain(sc, m0);
   7793 				m_freem(m0);
   7794 				continue;
   7795 			}
   7796 			/* Short on resources, just stop for now. */
   7797 			DPRINTF(WM_DEBUG_TX,
   7798 			    ("%s: TX: dmamap load failed: %d\n",
   7799 				device_xname(sc->sc_dev), error));
   7800 			break;
   7801 		}
   7802 
   7803 		segs_needed = dmamap->dm_nsegs;
   7804 		if (use_tso) {
   7805 			/* For sentinel descriptor; see below. */
   7806 			segs_needed++;
   7807 		}
   7808 
   7809 		/*
   7810 		 * Ensure we have enough descriptors free to describe
   7811 		 * the packet. Note, we always reserve one descriptor
   7812 		 * at the end of the ring due to the semantics of the
   7813 		 * TDT register, plus one more in the event we need
   7814 		 * to load offload context.
   7815 		 */
   7816 		if (segs_needed > txq->txq_free - 2) {
   7817 			/*
   7818 			 * Not enough free descriptors to transmit this
   7819 			 * packet.  We haven't committed anything yet,
   7820 			 * so just unload the DMA map, put the packet
   7821 			 * pack on the queue, and punt. Notify the upper
   7822 			 * layer that there are no more slots left.
   7823 			 */
   7824 			DPRINTF(WM_DEBUG_TX,
   7825 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7826 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7827 				segs_needed, txq->txq_free - 1));
   7828 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7829 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7830 			WM_Q_EVCNT_INCR(txq, txdstall);
   7831 			break;
   7832 		}
   7833 
   7834 		/*
   7835 		 * Check for 82547 Tx FIFO bug. We need to do this
   7836 		 * once we know we can transmit the packet, since we
   7837 		 * do some internal FIFO space accounting here.
   7838 		 */
   7839 		if (sc->sc_type == WM_T_82547 &&
   7840 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7841 			DPRINTF(WM_DEBUG_TX,
   7842 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7843 				device_xname(sc->sc_dev)));
   7844 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7845 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7846 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7847 			break;
   7848 		}
   7849 
   7850 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7851 
   7852 		DPRINTF(WM_DEBUG_TX,
   7853 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7854 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7855 
   7856 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7857 
   7858 		/*
   7859 		 * Store a pointer to the packet so that we can free it
   7860 		 * later.
   7861 		 *
   7862 		 * Initially, we consider the number of descriptors the
   7863 		 * packet uses the number of DMA segments.  This may be
   7864 		 * incremented by 1 if we do checksum offload (a descriptor
   7865 		 * is used to set the checksum context).
   7866 		 */
   7867 		txs->txs_mbuf = m0;
   7868 		txs->txs_firstdesc = txq->txq_next;
   7869 		txs->txs_ndesc = segs_needed;
   7870 
   7871 		/* Set up offload parameters for this packet. */
   7872 		if (m0->m_pkthdr.csum_flags &
   7873 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7874 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7875 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7876 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   7877 		} else {
   7878  			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7879  			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   7880 			cksumcmd = 0;
   7881 			cksumfields = 0;
   7882 		}
   7883 
   7884 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7885 
   7886 		/* Sync the DMA map. */
   7887 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7888 		    BUS_DMASYNC_PREWRITE);
   7889 
   7890 		/* Initialize the transmit descriptor. */
   7891 		for (nexttx = txq->txq_next, seg = 0;
   7892 		     seg < dmamap->dm_nsegs; seg++) {
   7893 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7894 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7895 			     seglen != 0;
   7896 			     curaddr += curlen, seglen -= curlen,
   7897 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7898 				curlen = seglen;
   7899 
   7900 				/*
   7901 				 * So says the Linux driver:
   7902 				 * Work around for premature descriptor
   7903 				 * write-backs in TSO mode.  Append a
   7904 				 * 4-byte sentinel descriptor.
   7905 				 */
   7906 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7907 				    curlen > 8)
   7908 					curlen -= 4;
   7909 
   7910 				wm_set_dma_addr(
   7911 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7912 				txq->txq_descs[nexttx].wtx_cmdlen
   7913 				    = htole32(cksumcmd | curlen);
   7914 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7915 				    = 0;
   7916 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7917 				    = cksumfields;
   7918 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7919 				lasttx = nexttx;
   7920 
   7921 				DPRINTF(WM_DEBUG_TX,
   7922 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7923 					"len %#04zx\n",
   7924 					device_xname(sc->sc_dev), nexttx,
   7925 					(uint64_t)curaddr, curlen));
   7926 			}
   7927 		}
   7928 
   7929 		KASSERT(lasttx != -1);
   7930 
   7931 		/*
   7932 		 * Set up the command byte on the last descriptor of
   7933 		 * the packet. If we're in the interrupt delay window,
   7934 		 * delay the interrupt.
   7935 		 */
   7936 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7937 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7938 
   7939 		/*
   7940 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7941 		 * up the descriptor to encapsulate the packet for us.
   7942 		 *
   7943 		 * This is only valid on the last descriptor of the packet.
   7944 		 */
   7945 		if (vlan_has_tag(m0)) {
   7946 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7947 			    htole32(WTX_CMD_VLE);
   7948 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7949 			    = htole16(vlan_get_tag(m0));
   7950 		}
   7951 
   7952 		txs->txs_lastdesc = lasttx;
   7953 
   7954 		DPRINTF(WM_DEBUG_TX,
   7955 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7956 			device_xname(sc->sc_dev),
   7957 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7958 
   7959 		/* Sync the descriptors we're using. */
   7960 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7961 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7962 
   7963 		/* Give the packet to the chip. */
   7964 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7965 
   7966 		DPRINTF(WM_DEBUG_TX,
   7967 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7968 
   7969 		DPRINTF(WM_DEBUG_TX,
   7970 		    ("%s: TX: finished transmitting packet, job %d\n",
   7971 			device_xname(sc->sc_dev), txq->txq_snext));
   7972 
   7973 		/* Advance the tx pointer. */
   7974 		txq->txq_free -= txs->txs_ndesc;
   7975 		txq->txq_next = nexttx;
   7976 
   7977 		txq->txq_sfree--;
   7978 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7979 
   7980 		/* Pass the packet to any BPF listeners. */
   7981 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7982 	}
   7983 
   7984 	if (m0 != NULL) {
   7985 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7986 		WM_Q_EVCNT_INCR(txq, descdrop);
   7987 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7988 			__func__));
   7989 		m_freem(m0);
   7990 	}
   7991 
   7992 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7993 		/* No more slots; notify upper layer. */
   7994 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7995 	}
   7996 
   7997 	if (txq->txq_free != ofree) {
   7998 		/* Set a watchdog timer in case the chip flakes out. */
   7999 		txq->txq_lastsent = time_uptime;
   8000 		txq->txq_sending = true;
   8001 	}
   8002 }
   8003 
   8004 /*
   8005  * wm_nq_tx_offload:
   8006  *
   8007  *	Set up TCP/IP checksumming parameters for the
   8008  *	specified packet, for NEWQUEUE devices
   8009  */
   8010 static void
   8011 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8012     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8013 {
   8014 	struct mbuf *m0 = txs->txs_mbuf;
   8015 	uint32_t vl_len, mssidx, cmdc;
   8016 	struct ether_header *eh;
   8017 	int offset, iphl;
   8018 
   8019 	/*
   8020 	 * XXX It would be nice if the mbuf pkthdr had offset
   8021 	 * fields for the protocol headers.
   8022 	 */
   8023 	*cmdlenp = 0;
   8024 	*fieldsp = 0;
   8025 
   8026 	eh = mtod(m0, struct ether_header *);
   8027 	switch (htons(eh->ether_type)) {
   8028 	case ETHERTYPE_IP:
   8029 	case ETHERTYPE_IPV6:
   8030 		offset = ETHER_HDR_LEN;
   8031 		break;
   8032 
   8033 	case ETHERTYPE_VLAN:
   8034 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8035 		break;
   8036 
   8037 	default:
   8038 		/* Don't support this protocol or encapsulation. */
   8039 		*do_csum = false;
   8040 		return;
   8041 	}
   8042 	*do_csum = true;
   8043 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8044 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8045 
   8046 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8047 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8048 
   8049 	if ((m0->m_pkthdr.csum_flags &
   8050 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8051 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8052 	} else {
   8053 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8054 	}
   8055 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8056 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8057 
   8058 	if (vlan_has_tag(m0)) {
   8059 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8060 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8061 		*cmdlenp |= NQTX_CMD_VLE;
   8062 	}
   8063 
   8064 	mssidx = 0;
   8065 
   8066 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8067 		int hlen = offset + iphl;
   8068 		int tcp_hlen;
   8069 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8070 
   8071 		if (__predict_false(m0->m_len <
   8072 				    (hlen + sizeof(struct tcphdr)))) {
   8073 			/*
   8074 			 * TCP/IP headers are not in the first mbuf; we need
   8075 			 * to do this the slow and painful way. Let's just
   8076 			 * hope this doesn't happen very often.
   8077 			 */
   8078 			struct tcphdr th;
   8079 
   8080 			WM_Q_EVCNT_INCR(txq, tsopain);
   8081 
   8082 			m_copydata(m0, hlen, sizeof(th), &th);
   8083 			if (v4) {
   8084 				struct ip ip;
   8085 
   8086 				m_copydata(m0, offset, sizeof(ip), &ip);
   8087 				ip.ip_len = 0;
   8088 				m_copyback(m0,
   8089 				    offset + offsetof(struct ip, ip_len),
   8090 				    sizeof(ip.ip_len), &ip.ip_len);
   8091 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8092 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8093 			} else {
   8094 				struct ip6_hdr ip6;
   8095 
   8096 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8097 				ip6.ip6_plen = 0;
   8098 				m_copyback(m0,
   8099 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8100 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8101 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8102 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8103 			}
   8104 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8105 			    sizeof(th.th_sum), &th.th_sum);
   8106 
   8107 			tcp_hlen = th.th_off << 2;
   8108 		} else {
   8109 			/*
   8110 			 * TCP/IP headers are in the first mbuf; we can do
   8111 			 * this the easy way.
   8112 			 */
   8113 			struct tcphdr *th;
   8114 
   8115 			if (v4) {
   8116 				struct ip *ip =
   8117 				    (void *)(mtod(m0, char *) + offset);
   8118 				th = (void *)(mtod(m0, char *) + hlen);
   8119 
   8120 				ip->ip_len = 0;
   8121 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8122 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8123 			} else {
   8124 				struct ip6_hdr *ip6 =
   8125 				    (void *)(mtod(m0, char *) + offset);
   8126 				th = (void *)(mtod(m0, char *) + hlen);
   8127 
   8128 				ip6->ip6_plen = 0;
   8129 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8130 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8131 			}
   8132 			tcp_hlen = th->th_off << 2;
   8133 		}
   8134 		hlen += tcp_hlen;
   8135 		*cmdlenp |= NQTX_CMD_TSE;
   8136 
   8137 		if (v4) {
   8138 			WM_Q_EVCNT_INCR(txq, tso);
   8139 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8140 		} else {
   8141 			WM_Q_EVCNT_INCR(txq, tso6);
   8142 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8143 		}
   8144 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8145 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8146 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8147 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8148 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8149 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8150 	} else {
   8151 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8152 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8153 	}
   8154 
   8155 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8156 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8157 		cmdc |= NQTXC_CMD_IP4;
   8158 	}
   8159 
   8160 	if (m0->m_pkthdr.csum_flags &
   8161 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8162 		WM_Q_EVCNT_INCR(txq, tusum);
   8163 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8164 			cmdc |= NQTXC_CMD_TCP;
   8165 		else
   8166 			cmdc |= NQTXC_CMD_UDP;
   8167 
   8168 		cmdc |= NQTXC_CMD_IP4;
   8169 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8170 	}
   8171 	if (m0->m_pkthdr.csum_flags &
   8172 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8173 		WM_Q_EVCNT_INCR(txq, tusum6);
   8174 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8175 			cmdc |= NQTXC_CMD_TCP;
   8176 		else
   8177 			cmdc |= NQTXC_CMD_UDP;
   8178 
   8179 		cmdc |= NQTXC_CMD_IP6;
   8180 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8181 	}
   8182 
   8183 	/*
   8184 	 * We don't have to write context descriptor for every packet to
   8185 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8186 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8187 	 * controllers.
   8188 	 * It would be overhead to write context descriptor for every packet,
   8189 	 * however it does not cause problems.
   8190 	 */
   8191 	/* Fill in the context descriptor. */
   8192 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8193 	    htole32(vl_len);
   8194 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8195 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8196 	    htole32(cmdc);
   8197 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8198 	    htole32(mssidx);
   8199 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8200 	DPRINTF(WM_DEBUG_TX,
   8201 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8202 		txq->txq_next, 0, vl_len));
   8203 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8204 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8205 	txs->txs_ndesc++;
   8206 }
   8207 
   8208 /*
   8209  * wm_nq_start:		[ifnet interface function]
   8210  *
   8211  *	Start packet transmission on the interface for NEWQUEUE devices
   8212  */
   8213 static void
   8214 wm_nq_start(struct ifnet *ifp)
   8215 {
   8216 	struct wm_softc *sc = ifp->if_softc;
   8217 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8218 
   8219 #ifdef WM_MPSAFE
   8220 	KASSERT(if_is_mpsafe(ifp));
   8221 #endif
   8222 	/*
   8223 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8224 	 */
   8225 
   8226 	mutex_enter(txq->txq_lock);
   8227 	if (!txq->txq_stopping)
   8228 		wm_nq_start_locked(ifp);
   8229 	mutex_exit(txq->txq_lock);
   8230 }
   8231 
   8232 static void
   8233 wm_nq_start_locked(struct ifnet *ifp)
   8234 {
   8235 	struct wm_softc *sc = ifp->if_softc;
   8236 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8237 
   8238 	wm_nq_send_common_locked(ifp, txq, false);
   8239 }
   8240 
   8241 static int
   8242 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8243 {
   8244 	int qid;
   8245 	struct wm_softc *sc = ifp->if_softc;
   8246 	struct wm_txqueue *txq;
   8247 
   8248 	qid = wm_select_txqueue(ifp, m);
   8249 	txq = &sc->sc_queue[qid].wmq_txq;
   8250 
   8251 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8252 		m_freem(m);
   8253 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8254 		return ENOBUFS;
   8255 	}
   8256 
   8257 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8258 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8259 	if (m->m_flags & M_MCAST)
   8260 		if_statinc_ref(nsr, if_omcasts);
   8261 	IF_STAT_PUTREF(ifp);
   8262 
   8263 	/*
   8264 	 * The situations which this mutex_tryenter() fails at running time
   8265 	 * are below two patterns.
   8266 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8267 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8268 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8269 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8270 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8271 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8272 	 * stuck, either.
   8273 	 */
   8274 	if (mutex_tryenter(txq->txq_lock)) {
   8275 		if (!txq->txq_stopping)
   8276 			wm_nq_transmit_locked(ifp, txq);
   8277 		mutex_exit(txq->txq_lock);
   8278 	}
   8279 
   8280 	return 0;
   8281 }
   8282 
   8283 static void
   8284 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8285 {
   8286 
   8287 	wm_nq_send_common_locked(ifp, txq, true);
   8288 }
   8289 
   8290 static void
   8291 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8292     bool is_transmit)
   8293 {
   8294 	struct wm_softc *sc = ifp->if_softc;
   8295 	struct mbuf *m0;
   8296 	struct wm_txsoft *txs;
   8297 	bus_dmamap_t dmamap;
   8298 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8299 	bool do_csum, sent;
   8300 	bool remap = true;
   8301 
   8302 	KASSERT(mutex_owned(txq->txq_lock));
   8303 
   8304 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8305 		return;
   8306 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8307 		return;
   8308 
   8309 	sent = false;
   8310 
   8311 	/*
   8312 	 * Loop through the send queue, setting up transmit descriptors
   8313 	 * until we drain the queue, or use up all available transmit
   8314 	 * descriptors.
   8315 	 */
   8316 	for (;;) {
   8317 		m0 = NULL;
   8318 
   8319 		/* Get a work queue entry. */
   8320 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8321 			wm_txeof(txq, UINT_MAX);
   8322 			if (txq->txq_sfree == 0) {
   8323 				DPRINTF(WM_DEBUG_TX,
   8324 				    ("%s: TX: no free job descriptors\n",
   8325 					device_xname(sc->sc_dev)));
   8326 				WM_Q_EVCNT_INCR(txq, txsstall);
   8327 				break;
   8328 			}
   8329 		}
   8330 
   8331 		/* Grab a packet off the queue. */
   8332 		if (is_transmit)
   8333 			m0 = pcq_get(txq->txq_interq);
   8334 		else
   8335 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8336 		if (m0 == NULL)
   8337 			break;
   8338 
   8339 		DPRINTF(WM_DEBUG_TX,
   8340 		    ("%s: TX: have packet to transmit: %p\n",
   8341 		    device_xname(sc->sc_dev), m0));
   8342 
   8343 		txs = &txq->txq_soft[txq->txq_snext];
   8344 		dmamap = txs->txs_dmamap;
   8345 
   8346 		/*
   8347 		 * Load the DMA map.  If this fails, the packet either
   8348 		 * didn't fit in the allotted number of segments, or we
   8349 		 * were short on resources.  For the too-many-segments
   8350 		 * case, we simply report an error and drop the packet,
   8351 		 * since we can't sanely copy a jumbo packet to a single
   8352 		 * buffer.
   8353 		 */
   8354 retry:
   8355 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8356 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8357 		if (__predict_false(error)) {
   8358 			if (error == EFBIG) {
   8359 				if (remap == true) {
   8360 					struct mbuf *m;
   8361 
   8362 					remap = false;
   8363 					m = m_defrag(m0, M_NOWAIT);
   8364 					if (m != NULL) {
   8365 						WM_Q_EVCNT_INCR(txq, defrag);
   8366 						m0 = m;
   8367 						goto retry;
   8368 					}
   8369 				}
   8370 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8371 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8372 				    "DMA segments, dropping...\n",
   8373 				    device_xname(sc->sc_dev));
   8374 				wm_dump_mbuf_chain(sc, m0);
   8375 				m_freem(m0);
   8376 				continue;
   8377 			}
   8378 			/* Short on resources, just stop for now. */
   8379 			DPRINTF(WM_DEBUG_TX,
   8380 			    ("%s: TX: dmamap load failed: %d\n",
   8381 				device_xname(sc->sc_dev), error));
   8382 			break;
   8383 		}
   8384 
   8385 		segs_needed = dmamap->dm_nsegs;
   8386 
   8387 		/*
   8388 		 * Ensure we have enough descriptors free to describe
   8389 		 * the packet. Note, we always reserve one descriptor
   8390 		 * at the end of the ring due to the semantics of the
   8391 		 * TDT register, plus one more in the event we need
   8392 		 * to load offload context.
   8393 		 */
   8394 		if (segs_needed > txq->txq_free - 2) {
   8395 			/*
   8396 			 * Not enough free descriptors to transmit this
   8397 			 * packet.  We haven't committed anything yet,
   8398 			 * so just unload the DMA map, put the packet
   8399 			 * pack on the queue, and punt. Notify the upper
   8400 			 * layer that there are no more slots left.
   8401 			 */
   8402 			DPRINTF(WM_DEBUG_TX,
   8403 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8404 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8405 				segs_needed, txq->txq_free - 1));
   8406 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8407 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8408 			WM_Q_EVCNT_INCR(txq, txdstall);
   8409 			break;
   8410 		}
   8411 
   8412 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8413 
   8414 		DPRINTF(WM_DEBUG_TX,
   8415 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8416 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8417 
   8418 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8419 
   8420 		/*
   8421 		 * Store a pointer to the packet so that we can free it
   8422 		 * later.
   8423 		 *
   8424 		 * Initially, we consider the number of descriptors the
   8425 		 * packet uses the number of DMA segments.  This may be
   8426 		 * incremented by 1 if we do checksum offload (a descriptor
   8427 		 * is used to set the checksum context).
   8428 		 */
   8429 		txs->txs_mbuf = m0;
   8430 		txs->txs_firstdesc = txq->txq_next;
   8431 		txs->txs_ndesc = segs_needed;
   8432 
   8433 		/* Set up offload parameters for this packet. */
   8434 		uint32_t cmdlen, fields, dcmdlen;
   8435 		if (m0->m_pkthdr.csum_flags &
   8436 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8437 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8438 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8439 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8440 			    &do_csum);
   8441 		} else {
   8442 			do_csum = false;
   8443 			cmdlen = 0;
   8444 			fields = 0;
   8445 		}
   8446 
   8447 		/* Sync the DMA map. */
   8448 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8449 		    BUS_DMASYNC_PREWRITE);
   8450 
   8451 		/* Initialize the first transmit descriptor. */
   8452 		nexttx = txq->txq_next;
   8453 		if (!do_csum) {
   8454 			/* Setup a legacy descriptor */
   8455 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8456 			    dmamap->dm_segs[0].ds_addr);
   8457 			txq->txq_descs[nexttx].wtx_cmdlen =
   8458 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8459 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8460 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8461 			if (vlan_has_tag(m0)) {
   8462 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8463 				    htole32(WTX_CMD_VLE);
   8464 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8465 				    htole16(vlan_get_tag(m0));
   8466 			} else
   8467 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8468 
   8469 			dcmdlen = 0;
   8470 		} else {
   8471 			/* Setup an advanced data descriptor */
   8472 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8473 			    htole64(dmamap->dm_segs[0].ds_addr);
   8474 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8475 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8476 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8477 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8478 			    htole32(fields);
   8479 			DPRINTF(WM_DEBUG_TX,
   8480 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8481 				device_xname(sc->sc_dev), nexttx,
   8482 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8483 			DPRINTF(WM_DEBUG_TX,
   8484 			    ("\t 0x%08x%08x\n", fields,
   8485 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8486 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8487 		}
   8488 
   8489 		lasttx = nexttx;
   8490 		nexttx = WM_NEXTTX(txq, nexttx);
   8491 		/*
   8492 		 * Fill in the next descriptors. legacy or advanced format
   8493 		 * is the same here
   8494 		 */
   8495 		for (seg = 1; seg < dmamap->dm_nsegs;
   8496 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8497 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8498 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8499 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8500 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8501 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8502 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8503 			lasttx = nexttx;
   8504 
   8505 			DPRINTF(WM_DEBUG_TX,
   8506 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8507 				device_xname(sc->sc_dev), nexttx,
   8508 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8509 				dmamap->dm_segs[seg].ds_len));
   8510 		}
   8511 
   8512 		KASSERT(lasttx != -1);
   8513 
   8514 		/*
   8515 		 * Set up the command byte on the last descriptor of
   8516 		 * the packet. If we're in the interrupt delay window,
   8517 		 * delay the interrupt.
   8518 		 */
   8519 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8520 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8521 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8522 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8523 
   8524 		txs->txs_lastdesc = lasttx;
   8525 
   8526 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8527 		    device_xname(sc->sc_dev),
   8528 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8529 
   8530 		/* Sync the descriptors we're using. */
   8531 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8532 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8533 
   8534 		/* Give the packet to the chip. */
   8535 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8536 		sent = true;
   8537 
   8538 		DPRINTF(WM_DEBUG_TX,
   8539 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8540 
   8541 		DPRINTF(WM_DEBUG_TX,
   8542 		    ("%s: TX: finished transmitting packet, job %d\n",
   8543 			device_xname(sc->sc_dev), txq->txq_snext));
   8544 
   8545 		/* Advance the tx pointer. */
   8546 		txq->txq_free -= txs->txs_ndesc;
   8547 		txq->txq_next = nexttx;
   8548 
   8549 		txq->txq_sfree--;
   8550 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8551 
   8552 		/* Pass the packet to any BPF listeners. */
   8553 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8554 	}
   8555 
   8556 	if (m0 != NULL) {
   8557 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8558 		WM_Q_EVCNT_INCR(txq, descdrop);
   8559 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8560 			__func__));
   8561 		m_freem(m0);
   8562 	}
   8563 
   8564 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8565 		/* No more slots; notify upper layer. */
   8566 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8567 	}
   8568 
   8569 	if (sent) {
   8570 		/* Set a watchdog timer in case the chip flakes out. */
   8571 		txq->txq_lastsent = time_uptime;
   8572 		txq->txq_sending = true;
   8573 	}
   8574 }
   8575 
   8576 static void
   8577 wm_deferred_start_locked(struct wm_txqueue *txq)
   8578 {
   8579 	struct wm_softc *sc = txq->txq_sc;
   8580 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8581 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8582 	int qid = wmq->wmq_id;
   8583 
   8584 	KASSERT(mutex_owned(txq->txq_lock));
   8585 
   8586 	if (txq->txq_stopping) {
   8587 		mutex_exit(txq->txq_lock);
   8588 		return;
   8589 	}
   8590 
   8591 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8592 		/* XXX need for ALTQ or one CPU system */
   8593 		if (qid == 0)
   8594 			wm_nq_start_locked(ifp);
   8595 		wm_nq_transmit_locked(ifp, txq);
   8596 	} else {
   8597 		/* XXX need for ALTQ or one CPU system */
   8598 		if (qid == 0)
   8599 			wm_start_locked(ifp);
   8600 		wm_transmit_locked(ifp, txq);
   8601 	}
   8602 }
   8603 
   8604 /* Interrupt */
   8605 
   8606 /*
   8607  * wm_txeof:
   8608  *
   8609  *	Helper; handle transmit interrupts.
   8610  */
   8611 static bool
   8612 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8613 {
   8614 	struct wm_softc *sc = txq->txq_sc;
   8615 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8616 	struct wm_txsoft *txs;
   8617 	int count = 0;
   8618 	int i;
   8619 	uint8_t status;
   8620 	bool more = false;
   8621 
   8622 	KASSERT(mutex_owned(txq->txq_lock));
   8623 
   8624 	if (txq->txq_stopping)
   8625 		return false;
   8626 
   8627 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8628 
   8629 	/*
   8630 	 * Go through the Tx list and free mbufs for those
   8631 	 * frames which have been transmitted.
   8632 	 */
   8633 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8634 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8635 		if (limit-- == 0) {
   8636 			more = true;
   8637 			DPRINTF(WM_DEBUG_TX,
   8638 			    ("%s: TX: loop limited, job %d is not processed\n",
   8639 				device_xname(sc->sc_dev), i));
   8640 			break;
   8641 		}
   8642 
   8643 		txs = &txq->txq_soft[i];
   8644 
   8645 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8646 			device_xname(sc->sc_dev), i));
   8647 
   8648 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8649 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8650 
   8651 		status =
   8652 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8653 		if ((status & WTX_ST_DD) == 0) {
   8654 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8655 			    BUS_DMASYNC_PREREAD);
   8656 			break;
   8657 		}
   8658 
   8659 		count++;
   8660 		DPRINTF(WM_DEBUG_TX,
   8661 		    ("%s: TX: job %d done: descs %d..%d\n",
   8662 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8663 		    txs->txs_lastdesc));
   8664 
   8665 		/*
   8666 		 * XXX We should probably be using the statistics
   8667 		 * XXX registers, but I don't know if they exist
   8668 		 * XXX on chips before the i82544.
   8669 		 */
   8670 
   8671 #ifdef WM_EVENT_COUNTERS
   8672 		if (status & WTX_ST_TU)
   8673 			WM_Q_EVCNT_INCR(txq, underrun);
   8674 #endif /* WM_EVENT_COUNTERS */
   8675 
   8676 		/*
   8677 		 * 82574 and newer's document says the status field has neither
   8678 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8679 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8680 		 * Developer's Manual", 82574 datasheet and newer.
   8681 		 *
   8682 		 * XXX I saw the LC bit was set on I218 even though the media
   8683 		 * was full duplex, so the bit might be used for other
   8684 		 * meaning ...(I have no document).
   8685 		 */
   8686 
   8687 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8688 		    && ((sc->sc_type < WM_T_82574)
   8689 			|| (sc->sc_type == WM_T_80003))) {
   8690 			if_statinc(ifp, if_oerrors);
   8691 			if (status & WTX_ST_LC)
   8692 				log(LOG_WARNING, "%s: late collision\n",
   8693 				    device_xname(sc->sc_dev));
   8694 			else if (status & WTX_ST_EC) {
   8695 				if_statadd(ifp, if_collisions,
   8696 				    TX_COLLISION_THRESHOLD + 1);
   8697 				log(LOG_WARNING, "%s: excessive collisions\n",
   8698 				    device_xname(sc->sc_dev));
   8699 			}
   8700 		} else
   8701 			if_statinc(ifp, if_opackets);
   8702 
   8703 		txq->txq_packets++;
   8704 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8705 
   8706 		txq->txq_free += txs->txs_ndesc;
   8707 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8708 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8709 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8710 		m_freem(txs->txs_mbuf);
   8711 		txs->txs_mbuf = NULL;
   8712 	}
   8713 
   8714 	/* Update the dirty transmit buffer pointer. */
   8715 	txq->txq_sdirty = i;
   8716 	DPRINTF(WM_DEBUG_TX,
   8717 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8718 
   8719 	/*
   8720 	 * If there are no more pending transmissions, cancel the watchdog
   8721 	 * timer.
   8722 	 */
   8723 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8724 		txq->txq_sending = false;
   8725 
   8726 	return more;
   8727 }
   8728 
   8729 static inline uint32_t
   8730 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8731 {
   8732 	struct wm_softc *sc = rxq->rxq_sc;
   8733 
   8734 	if (sc->sc_type == WM_T_82574)
   8735 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8736 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8737 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8738 	else
   8739 		return rxq->rxq_descs[idx].wrx_status;
   8740 }
   8741 
   8742 static inline uint32_t
   8743 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8744 {
   8745 	struct wm_softc *sc = rxq->rxq_sc;
   8746 
   8747 	if (sc->sc_type == WM_T_82574)
   8748 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8749 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8750 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8751 	else
   8752 		return rxq->rxq_descs[idx].wrx_errors;
   8753 }
   8754 
   8755 static inline uint16_t
   8756 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8757 {
   8758 	struct wm_softc *sc = rxq->rxq_sc;
   8759 
   8760 	if (sc->sc_type == WM_T_82574)
   8761 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8762 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8763 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8764 	else
   8765 		return rxq->rxq_descs[idx].wrx_special;
   8766 }
   8767 
   8768 static inline int
   8769 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8770 {
   8771 	struct wm_softc *sc = rxq->rxq_sc;
   8772 
   8773 	if (sc->sc_type == WM_T_82574)
   8774 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8775 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8776 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8777 	else
   8778 		return rxq->rxq_descs[idx].wrx_len;
   8779 }
   8780 
   8781 #ifdef WM_DEBUG
   8782 static inline uint32_t
   8783 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8784 {
   8785 	struct wm_softc *sc = rxq->rxq_sc;
   8786 
   8787 	if (sc->sc_type == WM_T_82574)
   8788 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8789 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8790 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8791 	else
   8792 		return 0;
   8793 }
   8794 
   8795 static inline uint8_t
   8796 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8797 {
   8798 	struct wm_softc *sc = rxq->rxq_sc;
   8799 
   8800 	if (sc->sc_type == WM_T_82574)
   8801 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8802 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8803 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8804 	else
   8805 		return 0;
   8806 }
   8807 #endif /* WM_DEBUG */
   8808 
   8809 static inline bool
   8810 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8811     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8812 {
   8813 
   8814 	if (sc->sc_type == WM_T_82574)
   8815 		return (status & ext_bit) != 0;
   8816 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8817 		return (status & nq_bit) != 0;
   8818 	else
   8819 		return (status & legacy_bit) != 0;
   8820 }
   8821 
   8822 static inline bool
   8823 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8824     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8825 {
   8826 
   8827 	if (sc->sc_type == WM_T_82574)
   8828 		return (error & ext_bit) != 0;
   8829 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8830 		return (error & nq_bit) != 0;
   8831 	else
   8832 		return (error & legacy_bit) != 0;
   8833 }
   8834 
   8835 static inline bool
   8836 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8837 {
   8838 
   8839 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8840 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8841 		return true;
   8842 	else
   8843 		return false;
   8844 }
   8845 
   8846 static inline bool
   8847 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8848 {
   8849 	struct wm_softc *sc = rxq->rxq_sc;
   8850 
   8851 	/* XXX missing error bit for newqueue? */
   8852 	if (wm_rxdesc_is_set_error(sc, errors,
   8853 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8854 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8855 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8856 		NQRXC_ERROR_RXE)) {
   8857 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8858 		    EXTRXC_ERROR_SE, 0))
   8859 			log(LOG_WARNING, "%s: symbol error\n",
   8860 			    device_xname(sc->sc_dev));
   8861 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8862 		    EXTRXC_ERROR_SEQ, 0))
   8863 			log(LOG_WARNING, "%s: receive sequence error\n",
   8864 			    device_xname(sc->sc_dev));
   8865 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8866 		    EXTRXC_ERROR_CE, 0))
   8867 			log(LOG_WARNING, "%s: CRC error\n",
   8868 			    device_xname(sc->sc_dev));
   8869 		return true;
   8870 	}
   8871 
   8872 	return false;
   8873 }
   8874 
   8875 static inline bool
   8876 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8877 {
   8878 	struct wm_softc *sc = rxq->rxq_sc;
   8879 
   8880 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8881 		NQRXC_STATUS_DD)) {
   8882 		/* We have processed all of the receive descriptors. */
   8883 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8884 		return false;
   8885 	}
   8886 
   8887 	return true;
   8888 }
   8889 
   8890 static inline bool
   8891 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8892     uint16_t vlantag, struct mbuf *m)
   8893 {
   8894 
   8895 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8896 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8897 		vlan_set_tag(m, le16toh(vlantag));
   8898 	}
   8899 
   8900 	return true;
   8901 }
   8902 
   8903 static inline void
   8904 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8905     uint32_t errors, struct mbuf *m)
   8906 {
   8907 	struct wm_softc *sc = rxq->rxq_sc;
   8908 
   8909 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8910 		if (wm_rxdesc_is_set_status(sc, status,
   8911 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8912 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8913 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8914 			if (wm_rxdesc_is_set_error(sc, errors,
   8915 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8916 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8917 		}
   8918 		if (wm_rxdesc_is_set_status(sc, status,
   8919 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8920 			/*
   8921 			 * Note: we don't know if this was TCP or UDP,
   8922 			 * so we just set both bits, and expect the
   8923 			 * upper layers to deal.
   8924 			 */
   8925 			WM_Q_EVCNT_INCR(rxq, tusum);
   8926 			m->m_pkthdr.csum_flags |=
   8927 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8928 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8929 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8930 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8931 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8932 		}
   8933 	}
   8934 }
   8935 
   8936 /*
   8937  * wm_rxeof:
   8938  *
   8939  *	Helper; handle receive interrupts.
   8940  */
   8941 static bool
   8942 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8943 {
   8944 	struct wm_softc *sc = rxq->rxq_sc;
   8945 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8946 	struct wm_rxsoft *rxs;
   8947 	struct mbuf *m;
   8948 	int i, len;
   8949 	int count = 0;
   8950 	uint32_t status, errors;
   8951 	uint16_t vlantag;
   8952 	bool more = false;
   8953 
   8954 	KASSERT(mutex_owned(rxq->rxq_lock));
   8955 
   8956 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8957 		if (limit-- == 0) {
   8958 			rxq->rxq_ptr = i;
   8959 			more = true;
   8960 			DPRINTF(WM_DEBUG_RX,
   8961 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8962 				device_xname(sc->sc_dev), i));
   8963 			break;
   8964 		}
   8965 
   8966 		rxs = &rxq->rxq_soft[i];
   8967 
   8968 		DPRINTF(WM_DEBUG_RX,
   8969 		    ("%s: RX: checking descriptor %d\n",
   8970 			device_xname(sc->sc_dev), i));
   8971 		wm_cdrxsync(rxq, i,
   8972 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8973 
   8974 		status = wm_rxdesc_get_status(rxq, i);
   8975 		errors = wm_rxdesc_get_errors(rxq, i);
   8976 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8977 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8978 #ifdef WM_DEBUG
   8979 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8980 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8981 #endif
   8982 
   8983 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8984 			/*
   8985 			 * Update the receive pointer holding rxq_lock
   8986 			 * consistent with increment counter.
   8987 			 */
   8988 			rxq->rxq_ptr = i;
   8989 			break;
   8990 		}
   8991 
   8992 		count++;
   8993 		if (__predict_false(rxq->rxq_discard)) {
   8994 			DPRINTF(WM_DEBUG_RX,
   8995 			    ("%s: RX: discarding contents of descriptor %d\n",
   8996 				device_xname(sc->sc_dev), i));
   8997 			wm_init_rxdesc(rxq, i);
   8998 			if (wm_rxdesc_is_eop(rxq, status)) {
   8999 				/* Reset our state. */
   9000 				DPRINTF(WM_DEBUG_RX,
   9001 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9002 					device_xname(sc->sc_dev)));
   9003 				rxq->rxq_discard = 0;
   9004 			}
   9005 			continue;
   9006 		}
   9007 
   9008 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9009 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9010 
   9011 		m = rxs->rxs_mbuf;
   9012 
   9013 		/*
   9014 		 * Add a new receive buffer to the ring, unless of
   9015 		 * course the length is zero. Treat the latter as a
   9016 		 * failed mapping.
   9017 		 */
   9018 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9019 			/*
   9020 			 * Failed, throw away what we've done so
   9021 			 * far, and discard the rest of the packet.
   9022 			 */
   9023 			if_statinc(ifp, if_ierrors);
   9024 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9025 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9026 			wm_init_rxdesc(rxq, i);
   9027 			if (!wm_rxdesc_is_eop(rxq, status))
   9028 				rxq->rxq_discard = 1;
   9029 			if (rxq->rxq_head != NULL)
   9030 				m_freem(rxq->rxq_head);
   9031 			WM_RXCHAIN_RESET(rxq);
   9032 			DPRINTF(WM_DEBUG_RX,
   9033 			    ("%s: RX: Rx buffer allocation failed, "
   9034 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9035 				rxq->rxq_discard ? " (discard)" : ""));
   9036 			continue;
   9037 		}
   9038 
   9039 		m->m_len = len;
   9040 		rxq->rxq_len += len;
   9041 		DPRINTF(WM_DEBUG_RX,
   9042 		    ("%s: RX: buffer at %p len %d\n",
   9043 			device_xname(sc->sc_dev), m->m_data, len));
   9044 
   9045 		/* If this is not the end of the packet, keep looking. */
   9046 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9047 			WM_RXCHAIN_LINK(rxq, m);
   9048 			DPRINTF(WM_DEBUG_RX,
   9049 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9050 				device_xname(sc->sc_dev), rxq->rxq_len));
   9051 			continue;
   9052 		}
   9053 
   9054 		/*
   9055 		 * Okay, we have the entire packet now. The chip is
   9056 		 * configured to include the FCS except I350 and I21[01]
   9057 		 * (not all chips can be configured to strip it),
   9058 		 * so we need to trim it.
   9059 		 * May need to adjust length of previous mbuf in the
   9060 		 * chain if the current mbuf is too short.
   9061 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   9062 		 * is always set in I350, so we don't trim it.
   9063 		 */
   9064 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   9065 		    && (sc->sc_type != WM_T_I210)
   9066 		    && (sc->sc_type != WM_T_I211)) {
   9067 			if (m->m_len < ETHER_CRC_LEN) {
   9068 				rxq->rxq_tail->m_len
   9069 				    -= (ETHER_CRC_LEN - m->m_len);
   9070 				m->m_len = 0;
   9071 			} else
   9072 				m->m_len -= ETHER_CRC_LEN;
   9073 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9074 		} else
   9075 			len = rxq->rxq_len;
   9076 
   9077 		WM_RXCHAIN_LINK(rxq, m);
   9078 
   9079 		*rxq->rxq_tailp = NULL;
   9080 		m = rxq->rxq_head;
   9081 
   9082 		WM_RXCHAIN_RESET(rxq);
   9083 
   9084 		DPRINTF(WM_DEBUG_RX,
   9085 		    ("%s: RX: have entire packet, len -> %d\n",
   9086 			device_xname(sc->sc_dev), len));
   9087 
   9088 		/* If an error occurred, update stats and drop the packet. */
   9089 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9090 			m_freem(m);
   9091 			continue;
   9092 		}
   9093 
   9094 		/* No errors.  Receive the packet. */
   9095 		m_set_rcvif(m, ifp);
   9096 		m->m_pkthdr.len = len;
   9097 		/*
   9098 		 * TODO
   9099 		 * should be save rsshash and rsstype to this mbuf.
   9100 		 */
   9101 		DPRINTF(WM_DEBUG_RX,
   9102 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9103 			device_xname(sc->sc_dev), rsstype, rsshash));
   9104 
   9105 		/*
   9106 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9107 		 * for us.  Associate the tag with the packet.
   9108 		 */
   9109 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9110 			continue;
   9111 
   9112 		/* Set up checksum info for this packet. */
   9113 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9114 		/*
   9115 		 * Update the receive pointer holding rxq_lock consistent with
   9116 		 * increment counter.
   9117 		 */
   9118 		rxq->rxq_ptr = i;
   9119 		rxq->rxq_packets++;
   9120 		rxq->rxq_bytes += len;
   9121 		mutex_exit(rxq->rxq_lock);
   9122 
   9123 		/* Pass it on. */
   9124 		if_percpuq_enqueue(sc->sc_ipq, m);
   9125 
   9126 		mutex_enter(rxq->rxq_lock);
   9127 
   9128 		if (rxq->rxq_stopping)
   9129 			break;
   9130 	}
   9131 
   9132 	DPRINTF(WM_DEBUG_RX,
   9133 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9134 
   9135 	return more;
   9136 }
   9137 
   9138 /*
   9139  * wm_linkintr_gmii:
   9140  *
   9141  *	Helper; handle link interrupts for GMII.
   9142  */
   9143 static void
   9144 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9145 {
   9146 	device_t dev = sc->sc_dev;
   9147 	uint32_t status, reg;
   9148 	bool link;
   9149 	int rv;
   9150 
   9151 	KASSERT(WM_CORE_LOCKED(sc));
   9152 
   9153 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9154 		__func__));
   9155 
   9156 	if ((icr & ICR_LSC) == 0) {
   9157 		if (icr & ICR_RXSEQ)
   9158 			DPRINTF(WM_DEBUG_LINK,
   9159 			    ("%s: LINK Receive sequence error\n",
   9160 				device_xname(dev)));
   9161 		return;
   9162 	}
   9163 
   9164 	/* Link status changed */
   9165 	status = CSR_READ(sc, WMREG_STATUS);
   9166 	link = status & STATUS_LU;
   9167 	if (link) {
   9168 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9169 			device_xname(dev),
   9170 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9171 	} else {
   9172 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9173 			device_xname(dev)));
   9174 	}
   9175 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9176 		wm_gig_downshift_workaround_ich8lan(sc);
   9177 
   9178 	if ((sc->sc_type == WM_T_ICH8)
   9179 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9180 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9181 	}
   9182 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9183 		device_xname(dev)));
   9184 	mii_pollstat(&sc->sc_mii);
   9185 	if (sc->sc_type == WM_T_82543) {
   9186 		int miistatus, active;
   9187 
   9188 		/*
   9189 		 * With 82543, we need to force speed and
   9190 		 * duplex on the MAC equal to what the PHY
   9191 		 * speed and duplex configuration is.
   9192 		 */
   9193 		miistatus = sc->sc_mii.mii_media_status;
   9194 
   9195 		if (miistatus & IFM_ACTIVE) {
   9196 			active = sc->sc_mii.mii_media_active;
   9197 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9198 			switch (IFM_SUBTYPE(active)) {
   9199 			case IFM_10_T:
   9200 				sc->sc_ctrl |= CTRL_SPEED_10;
   9201 				break;
   9202 			case IFM_100_TX:
   9203 				sc->sc_ctrl |= CTRL_SPEED_100;
   9204 				break;
   9205 			case IFM_1000_T:
   9206 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9207 				break;
   9208 			default:
   9209 				/*
   9210 				 * Fiber?
   9211 				 * Shoud not enter here.
   9212 				 */
   9213 				device_printf(dev, "unknown media (%x)\n",
   9214 				    active);
   9215 				break;
   9216 			}
   9217 			if (active & IFM_FDX)
   9218 				sc->sc_ctrl |= CTRL_FD;
   9219 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9220 		}
   9221 	} else if (sc->sc_type == WM_T_PCH) {
   9222 		wm_k1_gig_workaround_hv(sc,
   9223 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9224 	}
   9225 
   9226 	/*
   9227 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9228 	 * aggressive resulting in many collisions. To avoid this, increase
   9229 	 * the IPG and reduce Rx latency in the PHY.
   9230 	 */
   9231 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9232 	    && link) {
   9233 		uint32_t tipg_reg;
   9234 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9235 		bool fdx;
   9236 		uint16_t emi_addr, emi_val;
   9237 
   9238 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9239 		tipg_reg &= ~TIPG_IPGT_MASK;
   9240 		fdx = status & STATUS_FD;
   9241 
   9242 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9243 			tipg_reg |= 0xff;
   9244 			/* Reduce Rx latency in analog PHY */
   9245 			emi_val = 0;
   9246 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9247 		    fdx && speed != STATUS_SPEED_1000) {
   9248 			tipg_reg |= 0xc;
   9249 			emi_val = 1;
   9250 		} else {
   9251 			/* Roll back the default values */
   9252 			tipg_reg |= 0x08;
   9253 			emi_val = 1;
   9254 		}
   9255 
   9256 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9257 
   9258 		rv = sc->phy.acquire(sc);
   9259 		if (rv)
   9260 			return;
   9261 
   9262 		if (sc->sc_type == WM_T_PCH2)
   9263 			emi_addr = I82579_RX_CONFIG;
   9264 		else
   9265 			emi_addr = I217_RX_CONFIG;
   9266 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9267 
   9268 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9269 			uint16_t phy_reg;
   9270 
   9271 			sc->phy.readreg_locked(dev, 2,
   9272 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9273 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9274 			if (speed == STATUS_SPEED_100
   9275 			    || speed == STATUS_SPEED_10)
   9276 				phy_reg |= 0x3e8;
   9277 			else
   9278 				phy_reg |= 0xfa;
   9279 			sc->phy.writereg_locked(dev, 2,
   9280 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9281 
   9282 			if (speed == STATUS_SPEED_1000) {
   9283 				sc->phy.readreg_locked(dev, 2,
   9284 				    HV_PM_CTRL, &phy_reg);
   9285 
   9286 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9287 
   9288 				sc->phy.writereg_locked(dev, 2,
   9289 				    HV_PM_CTRL, phy_reg);
   9290 			}
   9291 		}
   9292 		sc->phy.release(sc);
   9293 
   9294 		if (rv)
   9295 			return;
   9296 
   9297 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9298 			uint16_t data, ptr_gap;
   9299 
   9300 			if (speed == STATUS_SPEED_1000) {
   9301 				rv = sc->phy.acquire(sc);
   9302 				if (rv)
   9303 					return;
   9304 
   9305 				rv = sc->phy.readreg_locked(dev, 2,
   9306 				    I219_UNKNOWN1, &data);
   9307 				if (rv) {
   9308 					sc->phy.release(sc);
   9309 					return;
   9310 				}
   9311 
   9312 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9313 				if (ptr_gap < 0x18) {
   9314 					data &= ~(0x3ff << 2);
   9315 					data |= (0x18 << 2);
   9316 					rv = sc->phy.writereg_locked(dev,
   9317 					    2, I219_UNKNOWN1, data);
   9318 				}
   9319 				sc->phy.release(sc);
   9320 				if (rv)
   9321 					return;
   9322 			} else {
   9323 				rv = sc->phy.acquire(sc);
   9324 				if (rv)
   9325 					return;
   9326 
   9327 				rv = sc->phy.writereg_locked(dev, 2,
   9328 				    I219_UNKNOWN1, 0xc023);
   9329 				sc->phy.release(sc);
   9330 				if (rv)
   9331 					return;
   9332 
   9333 			}
   9334 		}
   9335 	}
   9336 
   9337 	/*
   9338 	 * I217 Packet Loss issue:
   9339 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9340 	 * on power up.
   9341 	 * Set the Beacon Duration for I217 to 8 usec
   9342 	 */
   9343 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9344 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9345 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9346 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9347 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9348 	}
   9349 
   9350 	/* Work-around I218 hang issue */
   9351 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9352 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9353 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9354 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9355 		wm_k1_workaround_lpt_lp(sc, link);
   9356 
   9357 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9358 		/*
   9359 		 * Set platform power management values for Latency
   9360 		 * Tolerance Reporting (LTR)
   9361 		 */
   9362 		wm_platform_pm_pch_lpt(sc,
   9363 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9364 	}
   9365 
   9366 	/* Clear link partner's EEE ability */
   9367 	sc->eee_lp_ability = 0;
   9368 
   9369 	/* FEXTNVM6 K1-off workaround */
   9370 	if (sc->sc_type == WM_T_PCH_SPT) {
   9371 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9372 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9373 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9374 		else
   9375 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9376 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9377 	}
   9378 
   9379 	if (!link)
   9380 		return;
   9381 
   9382 	switch (sc->sc_type) {
   9383 	case WM_T_PCH2:
   9384 		wm_k1_workaround_lv(sc);
   9385 		/* FALLTHROUGH */
   9386 	case WM_T_PCH:
   9387 		if (sc->sc_phytype == WMPHY_82578)
   9388 			wm_link_stall_workaround_hv(sc);
   9389 		break;
   9390 	default:
   9391 		break;
   9392 	}
   9393 
   9394 	/* Enable/Disable EEE after link up */
   9395 	if (sc->sc_phytype > WMPHY_82579)
   9396 		wm_set_eee_pchlan(sc);
   9397 }
   9398 
   9399 /*
   9400  * wm_linkintr_tbi:
   9401  *
   9402  *	Helper; handle link interrupts for TBI mode.
   9403  */
   9404 static void
   9405 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9406 {
   9407 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9408 	uint32_t status;
   9409 
   9410 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9411 		__func__));
   9412 
   9413 	status = CSR_READ(sc, WMREG_STATUS);
   9414 	if (icr & ICR_LSC) {
   9415 		wm_check_for_link(sc);
   9416 		if (status & STATUS_LU) {
   9417 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9418 				device_xname(sc->sc_dev),
   9419 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9420 			/*
   9421 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9422 			 * so we should update sc->sc_ctrl
   9423 			 */
   9424 
   9425 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9426 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9427 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9428 			if (status & STATUS_FD)
   9429 				sc->sc_tctl |=
   9430 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9431 			else
   9432 				sc->sc_tctl |=
   9433 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9434 			if (sc->sc_ctrl & CTRL_TFCE)
   9435 				sc->sc_fcrtl |= FCRTL_XONE;
   9436 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9437 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9438 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9439 			sc->sc_tbi_linkup = 1;
   9440 			if_link_state_change(ifp, LINK_STATE_UP);
   9441 		} else {
   9442 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9443 				device_xname(sc->sc_dev)));
   9444 			sc->sc_tbi_linkup = 0;
   9445 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9446 		}
   9447 		/* Update LED */
   9448 		wm_tbi_serdes_set_linkled(sc);
   9449 	} else if (icr & ICR_RXSEQ)
   9450 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9451 			device_xname(sc->sc_dev)));
   9452 }
   9453 
   9454 /*
   9455  * wm_linkintr_serdes:
   9456  *
   9457  *	Helper; handle link interrupts for TBI mode.
   9458  */
   9459 static void
   9460 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9461 {
   9462 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9463 	struct mii_data *mii = &sc->sc_mii;
   9464 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9465 	uint32_t pcs_adv, pcs_lpab, reg;
   9466 
   9467 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9468 		__func__));
   9469 
   9470 	if (icr & ICR_LSC) {
   9471 		/* Check PCS */
   9472 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9473 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9474 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9475 				device_xname(sc->sc_dev)));
   9476 			mii->mii_media_status |= IFM_ACTIVE;
   9477 			sc->sc_tbi_linkup = 1;
   9478 			if_link_state_change(ifp, LINK_STATE_UP);
   9479 		} else {
   9480 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9481 				device_xname(sc->sc_dev)));
   9482 			mii->mii_media_status |= IFM_NONE;
   9483 			sc->sc_tbi_linkup = 0;
   9484 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9485 			wm_tbi_serdes_set_linkled(sc);
   9486 			return;
   9487 		}
   9488 		mii->mii_media_active |= IFM_1000_SX;
   9489 		if ((reg & PCS_LSTS_FDX) != 0)
   9490 			mii->mii_media_active |= IFM_FDX;
   9491 		else
   9492 			mii->mii_media_active |= IFM_HDX;
   9493 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9494 			/* Check flow */
   9495 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9496 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9497 				DPRINTF(WM_DEBUG_LINK,
   9498 				    ("XXX LINKOK but not ACOMP\n"));
   9499 				return;
   9500 			}
   9501 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9502 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9503 			DPRINTF(WM_DEBUG_LINK,
   9504 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9505 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9506 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9507 				mii->mii_media_active |= IFM_FLOW
   9508 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9509 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9510 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9511 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9512 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9513 				mii->mii_media_active |= IFM_FLOW
   9514 				    | IFM_ETH_TXPAUSE;
   9515 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9516 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9517 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9518 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9519 				mii->mii_media_active |= IFM_FLOW
   9520 				    | IFM_ETH_RXPAUSE;
   9521 		}
   9522 		/* Update LED */
   9523 		wm_tbi_serdes_set_linkled(sc);
   9524 	} else
   9525 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9526 		    device_xname(sc->sc_dev)));
   9527 }
   9528 
   9529 /*
   9530  * wm_linkintr:
   9531  *
   9532  *	Helper; handle link interrupts.
   9533  */
   9534 static void
   9535 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9536 {
   9537 
   9538 	KASSERT(WM_CORE_LOCKED(sc));
   9539 
   9540 	if (sc->sc_flags & WM_F_HAS_MII)
   9541 		wm_linkintr_gmii(sc, icr);
   9542 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9543 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9544 		wm_linkintr_serdes(sc, icr);
   9545 	else
   9546 		wm_linkintr_tbi(sc, icr);
   9547 }
   9548 
   9549 
   9550 static inline void
   9551 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9552 {
   9553 
   9554 	if (wmq->wmq_txrx_use_workqueue)
   9555 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9556 	else
   9557 		softint_schedule(wmq->wmq_si);
   9558 }
   9559 
   9560 /*
   9561  * wm_intr_legacy:
   9562  *
   9563  *	Interrupt service routine for INTx and MSI.
   9564  */
   9565 static int
   9566 wm_intr_legacy(void *arg)
   9567 {
   9568 	struct wm_softc *sc = arg;
   9569 	struct wm_queue *wmq = &sc->sc_queue[0];
   9570 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9571 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9572 	uint32_t icr, rndval = 0;
   9573 	int handled = 0;
   9574 
   9575 	while (1 /* CONSTCOND */) {
   9576 		icr = CSR_READ(sc, WMREG_ICR);
   9577 		if ((icr & sc->sc_icr) == 0)
   9578 			break;
   9579 		if (handled == 0)
   9580 			DPRINTF(WM_DEBUG_TX,
   9581 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9582 		if (rndval == 0)
   9583 			rndval = icr;
   9584 
   9585 		mutex_enter(rxq->rxq_lock);
   9586 
   9587 		if (rxq->rxq_stopping) {
   9588 			mutex_exit(rxq->rxq_lock);
   9589 			break;
   9590 		}
   9591 
   9592 		handled = 1;
   9593 
   9594 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9595 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9596 			DPRINTF(WM_DEBUG_RX,
   9597 			    ("%s: RX: got Rx intr 0x%08x\n",
   9598 				device_xname(sc->sc_dev),
   9599 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9600 			WM_Q_EVCNT_INCR(rxq, intr);
   9601 		}
   9602 #endif
   9603 		/*
   9604 		 * wm_rxeof() does *not* call upper layer functions directly,
   9605 		 * as if_percpuq_enqueue() just call softint_schedule().
   9606 		 * So, we can call wm_rxeof() in interrupt context.
   9607 		 */
   9608 		wm_rxeof(rxq, UINT_MAX);
   9609 		/* Fill lower bits with RX index. See below for the upper. */
   9610 		rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9611 
   9612 		mutex_exit(rxq->rxq_lock);
   9613 		mutex_enter(txq->txq_lock);
   9614 
   9615 		if (txq->txq_stopping) {
   9616 			mutex_exit(txq->txq_lock);
   9617 			break;
   9618 		}
   9619 
   9620 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9621 		if (icr & ICR_TXDW) {
   9622 			DPRINTF(WM_DEBUG_TX,
   9623 			    ("%s: TX: got TXDW interrupt\n",
   9624 				device_xname(sc->sc_dev)));
   9625 			WM_Q_EVCNT_INCR(txq, txdw);
   9626 		}
   9627 #endif
   9628 		wm_txeof(txq, UINT_MAX);
   9629 		/* Fill upper bits with TX index. See above for the lower. */
   9630 		rndval = txq->txq_next * WM_NRXDESC;
   9631 
   9632 		mutex_exit(txq->txq_lock);
   9633 		WM_CORE_LOCK(sc);
   9634 
   9635 		if (sc->sc_core_stopping) {
   9636 			WM_CORE_UNLOCK(sc);
   9637 			break;
   9638 		}
   9639 
   9640 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9641 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9642 			wm_linkintr(sc, icr);
   9643 		}
   9644 		if ((icr & ICR_GPI(0)) != 0)
   9645 			device_printf(sc->sc_dev, "got module interrupt\n");
   9646 
   9647 		WM_CORE_UNLOCK(sc);
   9648 
   9649 		if (icr & ICR_RXO) {
   9650 #if defined(WM_DEBUG)
   9651 			log(LOG_WARNING, "%s: Receive overrun\n",
   9652 			    device_xname(sc->sc_dev));
   9653 #endif /* defined(WM_DEBUG) */
   9654 		}
   9655 	}
   9656 
   9657 	rnd_add_uint32(&sc->sc_queue[0].rnd_source, rndval);
   9658 
   9659 	if (handled) {
   9660 		/* Try to get more packets going. */
   9661 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9662 		wm_sched_handle_queue(sc, wmq);
   9663 	}
   9664 
   9665 	return handled;
   9666 }
   9667 
   9668 static inline void
   9669 wm_txrxintr_disable(struct wm_queue *wmq)
   9670 {
   9671 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9672 
   9673 	if (sc->sc_type == WM_T_82574)
   9674 		CSR_WRITE(sc, WMREG_IMC,
   9675 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9676 	else if (sc->sc_type == WM_T_82575)
   9677 		CSR_WRITE(sc, WMREG_EIMC,
   9678 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9679 	else
   9680 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9681 }
   9682 
   9683 static inline void
   9684 wm_txrxintr_enable(struct wm_queue *wmq)
   9685 {
   9686 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9687 
   9688 	wm_itrs_calculate(sc, wmq);
   9689 
   9690 	/*
   9691 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9692 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9693 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9694 	 * while each wm_handle_queue(wmq) is runnig.
   9695 	 */
   9696 	if (sc->sc_type == WM_T_82574)
   9697 		CSR_WRITE(sc, WMREG_IMS,
   9698 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9699 	else if (sc->sc_type == WM_T_82575)
   9700 		CSR_WRITE(sc, WMREG_EIMS,
   9701 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9702 	else
   9703 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9704 }
   9705 
   9706 static int
   9707 wm_txrxintr_msix(void *arg)
   9708 {
   9709 	struct wm_queue *wmq = arg;
   9710 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9711 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9712 	struct wm_softc *sc = txq->txq_sc;
   9713 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9714 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9715 	uint32_t rndval = 0;
   9716 	bool txmore;
   9717 	bool rxmore;
   9718 
   9719 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9720 
   9721 	DPRINTF(WM_DEBUG_TX,
   9722 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9723 
   9724 	wm_txrxintr_disable(wmq);
   9725 
   9726 	mutex_enter(txq->txq_lock);
   9727 
   9728 	if (txq->txq_stopping) {
   9729 		mutex_exit(txq->txq_lock);
   9730 		return 0;
   9731 	}
   9732 
   9733 	WM_Q_EVCNT_INCR(txq, txdw);
   9734 	txmore = wm_txeof(txq, txlimit);
   9735 	/* Fill upper bits with TX index. See below for the lower. */
   9736 	rndval = txq->txq_next * WM_NRXDESC;
   9737 	/* wm_deferred start() is done in wm_handle_queue(). */
   9738 	mutex_exit(txq->txq_lock);
   9739 
   9740 	DPRINTF(WM_DEBUG_RX,
   9741 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9742 	mutex_enter(rxq->rxq_lock);
   9743 
   9744 	if (rxq->rxq_stopping) {
   9745 		mutex_exit(rxq->rxq_lock);
   9746 		return 0;
   9747 	}
   9748 
   9749 	WM_Q_EVCNT_INCR(rxq, intr);
   9750 	rxmore = wm_rxeof(rxq, rxlimit);
   9751 
   9752 	/* Fill lower bits with RX index. See above for the upper. */
   9753 	rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9754 	mutex_exit(rxq->rxq_lock);
   9755 
   9756 	wm_itrs_writereg(sc, wmq);
   9757 
   9758 	/*
   9759 	 * This function is called in the hardware interrupt context and
   9760 	 * per-CPU, so it's not required to take a lock.
   9761 	 */
   9762 	if (rndval != 0)
   9763 		rnd_add_uint32(&sc->sc_queue[wmq->wmq_id].rnd_source, rndval);
   9764 
   9765 	if (txmore || rxmore) {
   9766 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9767 		wm_sched_handle_queue(sc, wmq);
   9768 	} else
   9769 		wm_txrxintr_enable(wmq);
   9770 
   9771 	return 1;
   9772 }
   9773 
   9774 static void
   9775 wm_handle_queue(void *arg)
   9776 {
   9777 	struct wm_queue *wmq = arg;
   9778 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9779 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9780 	struct wm_softc *sc = txq->txq_sc;
   9781 	u_int txlimit = sc->sc_tx_process_limit;
   9782 	u_int rxlimit = sc->sc_rx_process_limit;
   9783 	bool txmore;
   9784 	bool rxmore;
   9785 
   9786 	mutex_enter(txq->txq_lock);
   9787 	if (txq->txq_stopping) {
   9788 		mutex_exit(txq->txq_lock);
   9789 		return;
   9790 	}
   9791 	txmore = wm_txeof(txq, txlimit);
   9792 	wm_deferred_start_locked(txq);
   9793 	mutex_exit(txq->txq_lock);
   9794 
   9795 	mutex_enter(rxq->rxq_lock);
   9796 	if (rxq->rxq_stopping) {
   9797 		mutex_exit(rxq->rxq_lock);
   9798 		return;
   9799 	}
   9800 	WM_Q_EVCNT_INCR(rxq, defer);
   9801 	rxmore = wm_rxeof(rxq, rxlimit);
   9802 	mutex_exit(rxq->rxq_lock);
   9803 
   9804 	if (txmore || rxmore) {
   9805 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9806 		wm_sched_handle_queue(sc, wmq);
   9807 	} else
   9808 		wm_txrxintr_enable(wmq);
   9809 }
   9810 
   9811 static void
   9812 wm_handle_queue_work(struct work *wk, void *context)
   9813 {
   9814 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   9815 
   9816 	/*
   9817 	 * "enqueued flag" is not required here.
   9818 	 */
   9819 	wm_handle_queue(wmq);
   9820 }
   9821 
   9822 /*
   9823  * wm_linkintr_msix:
   9824  *
   9825  *	Interrupt service routine for link status change for MSI-X.
   9826  */
   9827 static int
   9828 wm_linkintr_msix(void *arg)
   9829 {
   9830 	struct wm_softc *sc = arg;
   9831 	uint32_t reg;
   9832 	bool has_rxo;
   9833 
   9834 	reg = CSR_READ(sc, WMREG_ICR);
   9835 	WM_CORE_LOCK(sc);
   9836 	DPRINTF(WM_DEBUG_LINK,
   9837 	    ("%s: LINK: got link intr. ICR = %08x\n",
   9838 		device_xname(sc->sc_dev), reg));
   9839 
   9840 	if (sc->sc_core_stopping)
   9841 		goto out;
   9842 
   9843 	if ((reg & ICR_LSC) != 0) {
   9844 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9845 		wm_linkintr(sc, ICR_LSC);
   9846 	}
   9847 	if ((reg & ICR_GPI(0)) != 0)
   9848 		device_printf(sc->sc_dev, "got module interrupt\n");
   9849 
   9850 	/*
   9851 	 * XXX 82574 MSI-X mode workaround
   9852 	 *
   9853 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9854 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9855 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9856 	 * interrupts by writing WMREG_ICS to process receive packets.
   9857 	 */
   9858 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9859 #if defined(WM_DEBUG)
   9860 		log(LOG_WARNING, "%s: Receive overrun\n",
   9861 		    device_xname(sc->sc_dev));
   9862 #endif /* defined(WM_DEBUG) */
   9863 
   9864 		has_rxo = true;
   9865 		/*
   9866 		 * The RXO interrupt is very high rate when receive traffic is
   9867 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9868 		 * interrupts. ICR_OTHER will be enabled at the end of
   9869 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9870 		 * ICR_RXQ(1) interrupts.
   9871 		 */
   9872 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9873 
   9874 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9875 	}
   9876 
   9877 
   9878 
   9879 out:
   9880 	WM_CORE_UNLOCK(sc);
   9881 
   9882 	if (sc->sc_type == WM_T_82574) {
   9883 		if (!has_rxo)
   9884 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9885 		else
   9886 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9887 	} else if (sc->sc_type == WM_T_82575)
   9888 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9889 	else
   9890 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9891 
   9892 	return 1;
   9893 }
   9894 
   9895 /*
   9896  * Media related.
   9897  * GMII, SGMII, TBI (and SERDES)
   9898  */
   9899 
   9900 /* Common */
   9901 
   9902 /*
   9903  * wm_tbi_serdes_set_linkled:
   9904  *
   9905  *	Update the link LED on TBI and SERDES devices.
   9906  */
   9907 static void
   9908 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9909 {
   9910 
   9911 	if (sc->sc_tbi_linkup)
   9912 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9913 	else
   9914 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9915 
   9916 	/* 82540 or newer devices are active low */
   9917 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9918 
   9919 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9920 }
   9921 
   9922 /* GMII related */
   9923 
   9924 /*
   9925  * wm_gmii_reset:
   9926  *
   9927  *	Reset the PHY.
   9928  */
   9929 static void
   9930 wm_gmii_reset(struct wm_softc *sc)
   9931 {
   9932 	uint32_t reg;
   9933 	int rv;
   9934 
   9935 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9936 		device_xname(sc->sc_dev), __func__));
   9937 
   9938 	rv = sc->phy.acquire(sc);
   9939 	if (rv != 0) {
   9940 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9941 		    __func__);
   9942 		return;
   9943 	}
   9944 
   9945 	switch (sc->sc_type) {
   9946 	case WM_T_82542_2_0:
   9947 	case WM_T_82542_2_1:
   9948 		/* null */
   9949 		break;
   9950 	case WM_T_82543:
   9951 		/*
   9952 		 * With 82543, we need to force speed and duplex on the MAC
   9953 		 * equal to what the PHY speed and duplex configuration is.
   9954 		 * In addition, we need to perform a hardware reset on the PHY
   9955 		 * to take it out of reset.
   9956 		 */
   9957 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9958 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9959 
   9960 		/* The PHY reset pin is active-low. */
   9961 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9962 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9963 		    CTRL_EXT_SWDPIN(4));
   9964 		reg |= CTRL_EXT_SWDPIO(4);
   9965 
   9966 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9967 		CSR_WRITE_FLUSH(sc);
   9968 		delay(10*1000);
   9969 
   9970 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9971 		CSR_WRITE_FLUSH(sc);
   9972 		delay(150);
   9973 #if 0
   9974 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9975 #endif
   9976 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9977 		break;
   9978 	case WM_T_82544:	/* Reset 10000us */
   9979 	case WM_T_82540:
   9980 	case WM_T_82545:
   9981 	case WM_T_82545_3:
   9982 	case WM_T_82546:
   9983 	case WM_T_82546_3:
   9984 	case WM_T_82541:
   9985 	case WM_T_82541_2:
   9986 	case WM_T_82547:
   9987 	case WM_T_82547_2:
   9988 	case WM_T_82571:	/* Reset 100us */
   9989 	case WM_T_82572:
   9990 	case WM_T_82573:
   9991 	case WM_T_82574:
   9992 	case WM_T_82575:
   9993 	case WM_T_82576:
   9994 	case WM_T_82580:
   9995 	case WM_T_I350:
   9996 	case WM_T_I354:
   9997 	case WM_T_I210:
   9998 	case WM_T_I211:
   9999 	case WM_T_82583:
   10000 	case WM_T_80003:
   10001 		/* Generic reset */
   10002 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10003 		CSR_WRITE_FLUSH(sc);
   10004 		delay(20000);
   10005 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10006 		CSR_WRITE_FLUSH(sc);
   10007 		delay(20000);
   10008 
   10009 		if ((sc->sc_type == WM_T_82541)
   10010 		    || (sc->sc_type == WM_T_82541_2)
   10011 		    || (sc->sc_type == WM_T_82547)
   10012 		    || (sc->sc_type == WM_T_82547_2)) {
   10013 			/* Workaround for igp are done in igp_reset() */
   10014 			/* XXX add code to set LED after phy reset */
   10015 		}
   10016 		break;
   10017 	case WM_T_ICH8:
   10018 	case WM_T_ICH9:
   10019 	case WM_T_ICH10:
   10020 	case WM_T_PCH:
   10021 	case WM_T_PCH2:
   10022 	case WM_T_PCH_LPT:
   10023 	case WM_T_PCH_SPT:
   10024 	case WM_T_PCH_CNP:
   10025 		/* Generic reset */
   10026 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10027 		CSR_WRITE_FLUSH(sc);
   10028 		delay(100);
   10029 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10030 		CSR_WRITE_FLUSH(sc);
   10031 		delay(150);
   10032 		break;
   10033 	default:
   10034 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10035 		    __func__);
   10036 		break;
   10037 	}
   10038 
   10039 	sc->phy.release(sc);
   10040 
   10041 	/* get_cfg_done */
   10042 	wm_get_cfg_done(sc);
   10043 
   10044 	/* Extra setup */
   10045 	switch (sc->sc_type) {
   10046 	case WM_T_82542_2_0:
   10047 	case WM_T_82542_2_1:
   10048 	case WM_T_82543:
   10049 	case WM_T_82544:
   10050 	case WM_T_82540:
   10051 	case WM_T_82545:
   10052 	case WM_T_82545_3:
   10053 	case WM_T_82546:
   10054 	case WM_T_82546_3:
   10055 	case WM_T_82541_2:
   10056 	case WM_T_82547_2:
   10057 	case WM_T_82571:
   10058 	case WM_T_82572:
   10059 	case WM_T_82573:
   10060 	case WM_T_82574:
   10061 	case WM_T_82583:
   10062 	case WM_T_82575:
   10063 	case WM_T_82576:
   10064 	case WM_T_82580:
   10065 	case WM_T_I350:
   10066 	case WM_T_I354:
   10067 	case WM_T_I210:
   10068 	case WM_T_I211:
   10069 	case WM_T_80003:
   10070 		/* Null */
   10071 		break;
   10072 	case WM_T_82541:
   10073 	case WM_T_82547:
   10074 		/* XXX Configure actively LED after PHY reset */
   10075 		break;
   10076 	case WM_T_ICH8:
   10077 	case WM_T_ICH9:
   10078 	case WM_T_ICH10:
   10079 	case WM_T_PCH:
   10080 	case WM_T_PCH2:
   10081 	case WM_T_PCH_LPT:
   10082 	case WM_T_PCH_SPT:
   10083 	case WM_T_PCH_CNP:
   10084 		wm_phy_post_reset(sc);
   10085 		break;
   10086 	default:
   10087 		panic("%s: unknown type\n", __func__);
   10088 		break;
   10089 	}
   10090 }
   10091 
   10092 /*
   10093  * Setup sc_phytype and mii_{read|write}reg.
   10094  *
   10095  *  To identify PHY type, correct read/write function should be selected.
   10096  * To select correct read/write function, PCI ID or MAC type are required
   10097  * without accessing PHY registers.
   10098  *
   10099  *  On the first call of this function, PHY ID is not known yet. Check
   10100  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10101  * result might be incorrect.
   10102  *
   10103  *  In the second call, PHY OUI and model is used to identify PHY type.
   10104  * It might not be perfect because of the lack of compared entry, but it
   10105  * would be better than the first call.
   10106  *
   10107  *  If the detected new result and previous assumption is different,
   10108  * diagnous message will be printed.
   10109  */
   10110 static void
   10111 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10112     uint16_t phy_model)
   10113 {
   10114 	device_t dev = sc->sc_dev;
   10115 	struct mii_data *mii = &sc->sc_mii;
   10116 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10117 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10118 	mii_readreg_t new_readreg;
   10119 	mii_writereg_t new_writereg;
   10120 	bool dodiag = true;
   10121 
   10122 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10123 		device_xname(sc->sc_dev), __func__));
   10124 
   10125 	/*
   10126 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10127 	 * incorrect. So don't print diag output when it's 2nd call.
   10128 	 */
   10129 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10130 		dodiag = false;
   10131 
   10132 	if (mii->mii_readreg == NULL) {
   10133 		/*
   10134 		 *  This is the first call of this function. For ICH and PCH
   10135 		 * variants, it's difficult to determine the PHY access method
   10136 		 * by sc_type, so use the PCI product ID for some devices.
   10137 		 */
   10138 
   10139 		switch (sc->sc_pcidevid) {
   10140 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10141 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10142 			/* 82577 */
   10143 			new_phytype = WMPHY_82577;
   10144 			break;
   10145 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10146 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10147 			/* 82578 */
   10148 			new_phytype = WMPHY_82578;
   10149 			break;
   10150 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10151 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10152 			/* 82579 */
   10153 			new_phytype = WMPHY_82579;
   10154 			break;
   10155 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10156 		case PCI_PRODUCT_INTEL_82801I_BM:
   10157 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10158 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10159 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10160 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10161 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10162 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10163 			/* ICH8, 9, 10 with 82567 */
   10164 			new_phytype = WMPHY_BM;
   10165 			break;
   10166 		default:
   10167 			break;
   10168 		}
   10169 	} else {
   10170 		/* It's not the first call. Use PHY OUI and model */
   10171 		switch (phy_oui) {
   10172 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10173 			switch (phy_model) {
   10174 			case 0x0004: /* XXX */
   10175 				new_phytype = WMPHY_82578;
   10176 				break;
   10177 			default:
   10178 				break;
   10179 			}
   10180 			break;
   10181 		case MII_OUI_xxMARVELL:
   10182 			switch (phy_model) {
   10183 			case MII_MODEL_xxMARVELL_I210:
   10184 				new_phytype = WMPHY_I210;
   10185 				break;
   10186 			case MII_MODEL_xxMARVELL_E1011:
   10187 			case MII_MODEL_xxMARVELL_E1000_3:
   10188 			case MII_MODEL_xxMARVELL_E1000_5:
   10189 			case MII_MODEL_xxMARVELL_E1112:
   10190 				new_phytype = WMPHY_M88;
   10191 				break;
   10192 			case MII_MODEL_xxMARVELL_E1149:
   10193 				new_phytype = WMPHY_BM;
   10194 				break;
   10195 			case MII_MODEL_xxMARVELL_E1111:
   10196 			case MII_MODEL_xxMARVELL_I347:
   10197 			case MII_MODEL_xxMARVELL_E1512:
   10198 			case MII_MODEL_xxMARVELL_E1340M:
   10199 			case MII_MODEL_xxMARVELL_E1543:
   10200 				new_phytype = WMPHY_M88;
   10201 				break;
   10202 			case MII_MODEL_xxMARVELL_I82563:
   10203 				new_phytype = WMPHY_GG82563;
   10204 				break;
   10205 			default:
   10206 				break;
   10207 			}
   10208 			break;
   10209 		case MII_OUI_INTEL:
   10210 			switch (phy_model) {
   10211 			case MII_MODEL_INTEL_I82577:
   10212 				new_phytype = WMPHY_82577;
   10213 				break;
   10214 			case MII_MODEL_INTEL_I82579:
   10215 				new_phytype = WMPHY_82579;
   10216 				break;
   10217 			case MII_MODEL_INTEL_I217:
   10218 				new_phytype = WMPHY_I217;
   10219 				break;
   10220 			case MII_MODEL_INTEL_I82580:
   10221 			case MII_MODEL_INTEL_I350:
   10222 				new_phytype = WMPHY_82580;
   10223 				break;
   10224 			default:
   10225 				break;
   10226 			}
   10227 			break;
   10228 		case MII_OUI_yyINTEL:
   10229 			switch (phy_model) {
   10230 			case MII_MODEL_yyINTEL_I82562G:
   10231 			case MII_MODEL_yyINTEL_I82562EM:
   10232 			case MII_MODEL_yyINTEL_I82562ET:
   10233 				new_phytype = WMPHY_IFE;
   10234 				break;
   10235 			case MII_MODEL_yyINTEL_IGP01E1000:
   10236 				new_phytype = WMPHY_IGP;
   10237 				break;
   10238 			case MII_MODEL_yyINTEL_I82566:
   10239 				new_phytype = WMPHY_IGP_3;
   10240 				break;
   10241 			default:
   10242 				break;
   10243 			}
   10244 			break;
   10245 		default:
   10246 			break;
   10247 		}
   10248 
   10249 		if (dodiag) {
   10250 			if (new_phytype == WMPHY_UNKNOWN)
   10251 				aprint_verbose_dev(dev,
   10252 				    "%s: Unknown PHY model. OUI=%06x, "
   10253 				    "model=%04x\n", __func__, phy_oui,
   10254 				    phy_model);
   10255 
   10256 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10257 			    && (sc->sc_phytype != new_phytype)) {
   10258 				aprint_error_dev(dev, "Previously assumed PHY "
   10259 				    "type(%u) was incorrect. PHY type from PHY"
   10260 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10261 			}
   10262 		}
   10263 	}
   10264 
   10265 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10266 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10267 		/* SGMII */
   10268 		new_readreg = wm_sgmii_readreg;
   10269 		new_writereg = wm_sgmii_writereg;
   10270 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10271 		/* BM2 (phyaddr == 1) */
   10272 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10273 		    && (new_phytype != WMPHY_BM)
   10274 		    && (new_phytype != WMPHY_UNKNOWN))
   10275 			doubt_phytype = new_phytype;
   10276 		new_phytype = WMPHY_BM;
   10277 		new_readreg = wm_gmii_bm_readreg;
   10278 		new_writereg = wm_gmii_bm_writereg;
   10279 	} else if (sc->sc_type >= WM_T_PCH) {
   10280 		/* All PCH* use _hv_ */
   10281 		new_readreg = wm_gmii_hv_readreg;
   10282 		new_writereg = wm_gmii_hv_writereg;
   10283 	} else if (sc->sc_type >= WM_T_ICH8) {
   10284 		/* non-82567 ICH8, 9 and 10 */
   10285 		new_readreg = wm_gmii_i82544_readreg;
   10286 		new_writereg = wm_gmii_i82544_writereg;
   10287 	} else if (sc->sc_type >= WM_T_80003) {
   10288 		/* 80003 */
   10289 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10290 		    && (new_phytype != WMPHY_GG82563)
   10291 		    && (new_phytype != WMPHY_UNKNOWN))
   10292 			doubt_phytype = new_phytype;
   10293 		new_phytype = WMPHY_GG82563;
   10294 		new_readreg = wm_gmii_i80003_readreg;
   10295 		new_writereg = wm_gmii_i80003_writereg;
   10296 	} else if (sc->sc_type >= WM_T_I210) {
   10297 		/* I210 and I211 */
   10298 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10299 		    && (new_phytype != WMPHY_I210)
   10300 		    && (new_phytype != WMPHY_UNKNOWN))
   10301 			doubt_phytype = new_phytype;
   10302 		new_phytype = WMPHY_I210;
   10303 		new_readreg = wm_gmii_gs40g_readreg;
   10304 		new_writereg = wm_gmii_gs40g_writereg;
   10305 	} else if (sc->sc_type >= WM_T_82580) {
   10306 		/* 82580, I350 and I354 */
   10307 		new_readreg = wm_gmii_82580_readreg;
   10308 		new_writereg = wm_gmii_82580_writereg;
   10309 	} else if (sc->sc_type >= WM_T_82544) {
   10310 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10311 		new_readreg = wm_gmii_i82544_readreg;
   10312 		new_writereg = wm_gmii_i82544_writereg;
   10313 	} else {
   10314 		new_readreg = wm_gmii_i82543_readreg;
   10315 		new_writereg = wm_gmii_i82543_writereg;
   10316 	}
   10317 
   10318 	if (new_phytype == WMPHY_BM) {
   10319 		/* All BM use _bm_ */
   10320 		new_readreg = wm_gmii_bm_readreg;
   10321 		new_writereg = wm_gmii_bm_writereg;
   10322 	}
   10323 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10324 		/* All PCH* use _hv_ */
   10325 		new_readreg = wm_gmii_hv_readreg;
   10326 		new_writereg = wm_gmii_hv_writereg;
   10327 	}
   10328 
   10329 	/* Diag output */
   10330 	if (dodiag) {
   10331 		if (doubt_phytype != WMPHY_UNKNOWN)
   10332 			aprint_error_dev(dev, "Assumed new PHY type was "
   10333 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10334 			    new_phytype);
   10335 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10336 		    && (sc->sc_phytype != new_phytype))
   10337 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10338 			    "was incorrect. New PHY type = %u\n",
   10339 			    sc->sc_phytype, new_phytype);
   10340 
   10341 		if ((mii->mii_readreg != NULL) &&
   10342 		    (new_phytype == WMPHY_UNKNOWN))
   10343 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10344 
   10345 		if ((mii->mii_readreg != NULL) &&
   10346 		    (mii->mii_readreg != new_readreg))
   10347 			aprint_error_dev(dev, "Previously assumed PHY "
   10348 			    "read/write function was incorrect.\n");
   10349 	}
   10350 
   10351 	/* Update now */
   10352 	sc->sc_phytype = new_phytype;
   10353 	mii->mii_readreg = new_readreg;
   10354 	mii->mii_writereg = new_writereg;
   10355 	if (new_readreg == wm_gmii_hv_readreg) {
   10356 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10357 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10358 	} else if (new_readreg == wm_sgmii_readreg) {
   10359 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10360 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10361 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10362 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10363 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10364 	}
   10365 }
   10366 
   10367 /*
   10368  * wm_get_phy_id_82575:
   10369  *
   10370  * Return PHY ID. Return -1 if it failed.
   10371  */
   10372 static int
   10373 wm_get_phy_id_82575(struct wm_softc *sc)
   10374 {
   10375 	uint32_t reg;
   10376 	int phyid = -1;
   10377 
   10378 	/* XXX */
   10379 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10380 		return -1;
   10381 
   10382 	if (wm_sgmii_uses_mdio(sc)) {
   10383 		switch (sc->sc_type) {
   10384 		case WM_T_82575:
   10385 		case WM_T_82576:
   10386 			reg = CSR_READ(sc, WMREG_MDIC);
   10387 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10388 			break;
   10389 		case WM_T_82580:
   10390 		case WM_T_I350:
   10391 		case WM_T_I354:
   10392 		case WM_T_I210:
   10393 		case WM_T_I211:
   10394 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10395 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10396 			break;
   10397 		default:
   10398 			return -1;
   10399 		}
   10400 	}
   10401 
   10402 	return phyid;
   10403 }
   10404 
   10405 
   10406 /*
   10407  * wm_gmii_mediainit:
   10408  *
   10409  *	Initialize media for use on 1000BASE-T devices.
   10410  */
   10411 static void
   10412 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10413 {
   10414 	device_t dev = sc->sc_dev;
   10415 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10416 	struct mii_data *mii = &sc->sc_mii;
   10417 
   10418 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10419 		device_xname(sc->sc_dev), __func__));
   10420 
   10421 	/* We have GMII. */
   10422 	sc->sc_flags |= WM_F_HAS_MII;
   10423 
   10424 	if (sc->sc_type == WM_T_80003)
   10425 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10426 	else
   10427 		sc->sc_tipg = TIPG_1000T_DFLT;
   10428 
   10429 	/*
   10430 	 * Let the chip set speed/duplex on its own based on
   10431 	 * signals from the PHY.
   10432 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10433 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10434 	 */
   10435 	sc->sc_ctrl |= CTRL_SLU;
   10436 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10437 
   10438 	/* Initialize our media structures and probe the GMII. */
   10439 	mii->mii_ifp = ifp;
   10440 
   10441 	mii->mii_statchg = wm_gmii_statchg;
   10442 
   10443 	/* get PHY control from SMBus to PCIe */
   10444 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10445 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10446 	    || (sc->sc_type == WM_T_PCH_CNP))
   10447 		wm_init_phy_workarounds_pchlan(sc);
   10448 
   10449 	wm_gmii_reset(sc);
   10450 
   10451 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10452 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10453 	    wm_gmii_mediastatus, sc->sc_core_lock);
   10454 
   10455 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10456 	    || (sc->sc_type == WM_T_82580)
   10457 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10458 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10459 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10460 			/* Attach only one port */
   10461 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10462 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10463 		} else {
   10464 			int i, id;
   10465 			uint32_t ctrl_ext;
   10466 
   10467 			id = wm_get_phy_id_82575(sc);
   10468 			if (id != -1) {
   10469 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10470 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10471 			}
   10472 			if ((id == -1)
   10473 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10474 				/* Power on sgmii phy if it is disabled */
   10475 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10476 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10477 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10478 				CSR_WRITE_FLUSH(sc);
   10479 				delay(300*1000); /* XXX too long */
   10480 
   10481 				/*
   10482 				 * From 1 to 8.
   10483 				 *
   10484 				 * I2C access fails with I2C register's ERROR
   10485 				 * bit set, so prevent error message while
   10486 				 * scanning.
   10487 				 */
   10488 				sc->phy.no_errprint = true;
   10489 				for (i = 1; i < 8; i++)
   10490 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10491 					    0xffffffff, i, MII_OFFSET_ANY,
   10492 					    MIIF_DOPAUSE);
   10493 				sc->phy.no_errprint = false;
   10494 
   10495 				/* Restore previous sfp cage power state */
   10496 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10497 			}
   10498 		}
   10499 	} else
   10500 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10501 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10502 
   10503 	/*
   10504 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10505 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10506 	 */
   10507 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10508 		|| (sc->sc_type == WM_T_PCH_SPT)
   10509 		|| (sc->sc_type == WM_T_PCH_CNP))
   10510 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10511 		wm_set_mdio_slow_mode_hv(sc);
   10512 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10513 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10514 	}
   10515 
   10516 	/*
   10517 	 * (For ICH8 variants)
   10518 	 * If PHY detection failed, use BM's r/w function and retry.
   10519 	 */
   10520 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10521 		/* if failed, retry with *_bm_* */
   10522 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10523 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10524 		    sc->sc_phytype);
   10525 		sc->sc_phytype = WMPHY_BM;
   10526 		mii->mii_readreg = wm_gmii_bm_readreg;
   10527 		mii->mii_writereg = wm_gmii_bm_writereg;
   10528 
   10529 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10530 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10531 	}
   10532 
   10533 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10534 		/* Any PHY wasn't find */
   10535 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10536 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10537 		sc->sc_phytype = WMPHY_NONE;
   10538 	} else {
   10539 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10540 
   10541 		/*
   10542 		 * PHY Found! Check PHY type again by the second call of
   10543 		 * wm_gmii_setup_phytype.
   10544 		 */
   10545 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10546 		    child->mii_mpd_model);
   10547 
   10548 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10549 	}
   10550 }
   10551 
   10552 /*
   10553  * wm_gmii_mediachange:	[ifmedia interface function]
   10554  *
   10555  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10556  */
   10557 static int
   10558 wm_gmii_mediachange(struct ifnet *ifp)
   10559 {
   10560 	struct wm_softc *sc = ifp->if_softc;
   10561 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10562 	uint32_t reg;
   10563 	int rc;
   10564 
   10565 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10566 		device_xname(sc->sc_dev), __func__));
   10567 	if ((ifp->if_flags & IFF_UP) == 0)
   10568 		return 0;
   10569 
   10570 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10571 	if ((sc->sc_type == WM_T_82580)
   10572 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10573 	    || (sc->sc_type == WM_T_I211)) {
   10574 		reg = CSR_READ(sc, WMREG_PHPM);
   10575 		reg &= ~PHPM_GO_LINK_D;
   10576 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10577 	}
   10578 
   10579 	/* Disable D0 LPLU. */
   10580 	wm_lplu_d0_disable(sc);
   10581 
   10582 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10583 	sc->sc_ctrl |= CTRL_SLU;
   10584 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10585 	    || (sc->sc_type > WM_T_82543)) {
   10586 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10587 	} else {
   10588 		sc->sc_ctrl &= ~CTRL_ASDE;
   10589 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10590 		if (ife->ifm_media & IFM_FDX)
   10591 			sc->sc_ctrl |= CTRL_FD;
   10592 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10593 		case IFM_10_T:
   10594 			sc->sc_ctrl |= CTRL_SPEED_10;
   10595 			break;
   10596 		case IFM_100_TX:
   10597 			sc->sc_ctrl |= CTRL_SPEED_100;
   10598 			break;
   10599 		case IFM_1000_T:
   10600 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10601 			break;
   10602 		case IFM_NONE:
   10603 			/* There is no specific setting for IFM_NONE */
   10604 			break;
   10605 		default:
   10606 			panic("wm_gmii_mediachange: bad media 0x%x",
   10607 			    ife->ifm_media);
   10608 		}
   10609 	}
   10610 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10611 	CSR_WRITE_FLUSH(sc);
   10612 
   10613 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10614 		wm_serdes_mediachange(ifp);
   10615 
   10616 	if (sc->sc_type <= WM_T_82543)
   10617 		wm_gmii_reset(sc);
   10618 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10619 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10620 		/* allow time for SFP cage time to power up phy */
   10621 		delay(300 * 1000);
   10622 		wm_gmii_reset(sc);
   10623 	}
   10624 
   10625 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10626 		return 0;
   10627 	return rc;
   10628 }
   10629 
   10630 /*
   10631  * wm_gmii_mediastatus:	[ifmedia interface function]
   10632  *
   10633  *	Get the current interface media status on a 1000BASE-T device.
   10634  */
   10635 static void
   10636 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10637 {
   10638 	struct wm_softc *sc = ifp->if_softc;
   10639 
   10640 	ether_mediastatus(ifp, ifmr);
   10641 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10642 	    | sc->sc_flowflags;
   10643 }
   10644 
   10645 #define	MDI_IO		CTRL_SWDPIN(2)
   10646 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10647 #define	MDI_CLK		CTRL_SWDPIN(3)
   10648 
   10649 static void
   10650 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10651 {
   10652 	uint32_t i, v;
   10653 
   10654 	v = CSR_READ(sc, WMREG_CTRL);
   10655 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10656 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10657 
   10658 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10659 		if (data & i)
   10660 			v |= MDI_IO;
   10661 		else
   10662 			v &= ~MDI_IO;
   10663 		CSR_WRITE(sc, WMREG_CTRL, v);
   10664 		CSR_WRITE_FLUSH(sc);
   10665 		delay(10);
   10666 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10667 		CSR_WRITE_FLUSH(sc);
   10668 		delay(10);
   10669 		CSR_WRITE(sc, WMREG_CTRL, v);
   10670 		CSR_WRITE_FLUSH(sc);
   10671 		delay(10);
   10672 	}
   10673 }
   10674 
   10675 static uint16_t
   10676 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10677 {
   10678 	uint32_t v, i;
   10679 	uint16_t data = 0;
   10680 
   10681 	v = CSR_READ(sc, WMREG_CTRL);
   10682 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10683 	v |= CTRL_SWDPIO(3);
   10684 
   10685 	CSR_WRITE(sc, WMREG_CTRL, v);
   10686 	CSR_WRITE_FLUSH(sc);
   10687 	delay(10);
   10688 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10689 	CSR_WRITE_FLUSH(sc);
   10690 	delay(10);
   10691 	CSR_WRITE(sc, WMREG_CTRL, v);
   10692 	CSR_WRITE_FLUSH(sc);
   10693 	delay(10);
   10694 
   10695 	for (i = 0; i < 16; i++) {
   10696 		data <<= 1;
   10697 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10698 		CSR_WRITE_FLUSH(sc);
   10699 		delay(10);
   10700 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10701 			data |= 1;
   10702 		CSR_WRITE(sc, WMREG_CTRL, v);
   10703 		CSR_WRITE_FLUSH(sc);
   10704 		delay(10);
   10705 	}
   10706 
   10707 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10708 	CSR_WRITE_FLUSH(sc);
   10709 	delay(10);
   10710 	CSR_WRITE(sc, WMREG_CTRL, v);
   10711 	CSR_WRITE_FLUSH(sc);
   10712 	delay(10);
   10713 
   10714 	return data;
   10715 }
   10716 
   10717 #undef MDI_IO
   10718 #undef MDI_DIR
   10719 #undef MDI_CLK
   10720 
   10721 /*
   10722  * wm_gmii_i82543_readreg:	[mii interface function]
   10723  *
   10724  *	Read a PHY register on the GMII (i82543 version).
   10725  */
   10726 static int
   10727 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10728 {
   10729 	struct wm_softc *sc = device_private(dev);
   10730 
   10731 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10732 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10733 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10734 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10735 
   10736 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10737 		device_xname(dev), phy, reg, *val));
   10738 
   10739 	return 0;
   10740 }
   10741 
   10742 /*
   10743  * wm_gmii_i82543_writereg:	[mii interface function]
   10744  *
   10745  *	Write a PHY register on the GMII (i82543 version).
   10746  */
   10747 static int
   10748 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10749 {
   10750 	struct wm_softc *sc = device_private(dev);
   10751 
   10752 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10753 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10754 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10755 	    (MII_COMMAND_START << 30), 32);
   10756 
   10757 	return 0;
   10758 }
   10759 
   10760 /*
   10761  * wm_gmii_mdic_readreg:	[mii interface function]
   10762  *
   10763  *	Read a PHY register on the GMII.
   10764  */
   10765 static int
   10766 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10767 {
   10768 	struct wm_softc *sc = device_private(dev);
   10769 	uint32_t mdic = 0;
   10770 	int i;
   10771 
   10772 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10773 	    && (reg > MII_ADDRMASK)) {
   10774 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10775 		    __func__, sc->sc_phytype, reg);
   10776 		reg &= MII_ADDRMASK;
   10777 	}
   10778 
   10779 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10780 	    MDIC_REGADD(reg));
   10781 
   10782 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10783 		delay(50);
   10784 		mdic = CSR_READ(sc, WMREG_MDIC);
   10785 		if (mdic & MDIC_READY)
   10786 			break;
   10787 	}
   10788 
   10789 	if ((mdic & MDIC_READY) == 0) {
   10790 		DPRINTF(WM_DEBUG_GMII,
   10791 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10792 			device_xname(dev), phy, reg));
   10793 		return ETIMEDOUT;
   10794 	} else if (mdic & MDIC_E) {
   10795 		/* This is normal if no PHY is present. */
   10796 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10797 			device_xname(sc->sc_dev), phy, reg));
   10798 		return -1;
   10799 	} else
   10800 		*val = MDIC_DATA(mdic);
   10801 
   10802 	/*
   10803 	 * Allow some time after each MDIC transaction to avoid
   10804 	 * reading duplicate data in the next MDIC transaction.
   10805 	 */
   10806 	if (sc->sc_type == WM_T_PCH2)
   10807 		delay(100);
   10808 
   10809 	return 0;
   10810 }
   10811 
   10812 /*
   10813  * wm_gmii_mdic_writereg:	[mii interface function]
   10814  *
   10815  *	Write a PHY register on the GMII.
   10816  */
   10817 static int
   10818 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10819 {
   10820 	struct wm_softc *sc = device_private(dev);
   10821 	uint32_t mdic = 0;
   10822 	int i;
   10823 
   10824 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10825 	    && (reg > MII_ADDRMASK)) {
   10826 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10827 		    __func__, sc->sc_phytype, reg);
   10828 		reg &= MII_ADDRMASK;
   10829 	}
   10830 
   10831 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10832 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10833 
   10834 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10835 		delay(50);
   10836 		mdic = CSR_READ(sc, WMREG_MDIC);
   10837 		if (mdic & MDIC_READY)
   10838 			break;
   10839 	}
   10840 
   10841 	if ((mdic & MDIC_READY) == 0) {
   10842 		DPRINTF(WM_DEBUG_GMII,
   10843 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10844 			device_xname(dev), phy, reg));
   10845 		return ETIMEDOUT;
   10846 	} else if (mdic & MDIC_E) {
   10847 		DPRINTF(WM_DEBUG_GMII,
   10848 		    ("%s: MDIC write error: phy %d reg %d\n",
   10849 			device_xname(dev), phy, reg));
   10850 		return -1;
   10851 	}
   10852 
   10853 	/*
   10854 	 * Allow some time after each MDIC transaction to avoid
   10855 	 * reading duplicate data in the next MDIC transaction.
   10856 	 */
   10857 	if (sc->sc_type == WM_T_PCH2)
   10858 		delay(100);
   10859 
   10860 	return 0;
   10861 }
   10862 
   10863 /*
   10864  * wm_gmii_i82544_readreg:	[mii interface function]
   10865  *
   10866  *	Read a PHY register on the GMII.
   10867  */
   10868 static int
   10869 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10870 {
   10871 	struct wm_softc *sc = device_private(dev);
   10872 	int rv;
   10873 
   10874 	if (sc->phy.acquire(sc)) {
   10875 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10876 		return -1;
   10877 	}
   10878 
   10879 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10880 
   10881 	sc->phy.release(sc);
   10882 
   10883 	return rv;
   10884 }
   10885 
   10886 static int
   10887 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10888 {
   10889 	struct wm_softc *sc = device_private(dev);
   10890 	int rv;
   10891 
   10892 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10893 		switch (sc->sc_phytype) {
   10894 		case WMPHY_IGP:
   10895 		case WMPHY_IGP_2:
   10896 		case WMPHY_IGP_3:
   10897 			rv = wm_gmii_mdic_writereg(dev, phy,
   10898 			    MII_IGPHY_PAGE_SELECT, reg);
   10899 			if (rv != 0)
   10900 				return rv;
   10901 			break;
   10902 		default:
   10903 #ifdef WM_DEBUG
   10904 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10905 			    __func__, sc->sc_phytype, reg);
   10906 #endif
   10907 			break;
   10908 		}
   10909 	}
   10910 
   10911 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10912 }
   10913 
   10914 /*
   10915  * wm_gmii_i82544_writereg:	[mii interface function]
   10916  *
   10917  *	Write a PHY register on the GMII.
   10918  */
   10919 static int
   10920 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10921 {
   10922 	struct wm_softc *sc = device_private(dev);
   10923 	int rv;
   10924 
   10925 	if (sc->phy.acquire(sc)) {
   10926 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10927 		return -1;
   10928 	}
   10929 
   10930 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10931 	sc->phy.release(sc);
   10932 
   10933 	return rv;
   10934 }
   10935 
   10936 static int
   10937 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10938 {
   10939 	struct wm_softc *sc = device_private(dev);
   10940 	int rv;
   10941 
   10942 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10943 		switch (sc->sc_phytype) {
   10944 		case WMPHY_IGP:
   10945 		case WMPHY_IGP_2:
   10946 		case WMPHY_IGP_3:
   10947 			rv = wm_gmii_mdic_writereg(dev, phy,
   10948 			    MII_IGPHY_PAGE_SELECT, reg);
   10949 			if (rv != 0)
   10950 				return rv;
   10951 			break;
   10952 		default:
   10953 #ifdef WM_DEBUG
   10954 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10955 			    __func__, sc->sc_phytype, reg);
   10956 #endif
   10957 			break;
   10958 		}
   10959 	}
   10960 
   10961 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10962 }
   10963 
   10964 /*
   10965  * wm_gmii_i80003_readreg:	[mii interface function]
   10966  *
   10967  *	Read a PHY register on the kumeran
   10968  * This could be handled by the PHY layer if we didn't have to lock the
   10969  * ressource ...
   10970  */
   10971 static int
   10972 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10973 {
   10974 	struct wm_softc *sc = device_private(dev);
   10975 	int page_select;
   10976 	uint16_t temp, temp2;
   10977 	int rv = 0;
   10978 
   10979 	if (phy != 1) /* Only one PHY on kumeran bus */
   10980 		return -1;
   10981 
   10982 	if (sc->phy.acquire(sc)) {
   10983 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10984 		return -1;
   10985 	}
   10986 
   10987 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10988 		page_select = GG82563_PHY_PAGE_SELECT;
   10989 	else {
   10990 		/*
   10991 		 * Use Alternative Page Select register to access registers
   10992 		 * 30 and 31.
   10993 		 */
   10994 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10995 	}
   10996 	temp = reg >> GG82563_PAGE_SHIFT;
   10997 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10998 		goto out;
   10999 
   11000 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11001 		/*
   11002 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11003 		 * register.
   11004 		 */
   11005 		delay(200);
   11006 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11007 		if ((rv != 0) || (temp2 != temp)) {
   11008 			device_printf(dev, "%s failed\n", __func__);
   11009 			rv = -1;
   11010 			goto out;
   11011 		}
   11012 		delay(200);
   11013 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11014 		delay(200);
   11015 	} else
   11016 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11017 
   11018 out:
   11019 	sc->phy.release(sc);
   11020 	return rv;
   11021 }
   11022 
   11023 /*
   11024  * wm_gmii_i80003_writereg:	[mii interface function]
   11025  *
   11026  *	Write a PHY register on the kumeran.
   11027  * This could be handled by the PHY layer if we didn't have to lock the
   11028  * ressource ...
   11029  */
   11030 static int
   11031 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11032 {
   11033 	struct wm_softc *sc = device_private(dev);
   11034 	int page_select, rv;
   11035 	uint16_t temp, temp2;
   11036 
   11037 	if (phy != 1) /* Only one PHY on kumeran bus */
   11038 		return -1;
   11039 
   11040 	if (sc->phy.acquire(sc)) {
   11041 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11042 		return -1;
   11043 	}
   11044 
   11045 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11046 		page_select = GG82563_PHY_PAGE_SELECT;
   11047 	else {
   11048 		/*
   11049 		 * Use Alternative Page Select register to access registers
   11050 		 * 30 and 31.
   11051 		 */
   11052 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11053 	}
   11054 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11055 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11056 		goto out;
   11057 
   11058 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11059 		/*
   11060 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11061 		 * register.
   11062 		 */
   11063 		delay(200);
   11064 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11065 		if ((rv != 0) || (temp2 != temp)) {
   11066 			device_printf(dev, "%s failed\n", __func__);
   11067 			rv = -1;
   11068 			goto out;
   11069 		}
   11070 		delay(200);
   11071 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11072 		delay(200);
   11073 	} else
   11074 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11075 
   11076 out:
   11077 	sc->phy.release(sc);
   11078 	return rv;
   11079 }
   11080 
   11081 /*
   11082  * wm_gmii_bm_readreg:	[mii interface function]
   11083  *
   11084  *	Read a PHY register on the kumeran
   11085  * This could be handled by the PHY layer if we didn't have to lock the
   11086  * ressource ...
   11087  */
   11088 static int
   11089 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11090 {
   11091 	struct wm_softc *sc = device_private(dev);
   11092 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11093 	int rv;
   11094 
   11095 	if (sc->phy.acquire(sc)) {
   11096 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11097 		return -1;
   11098 	}
   11099 
   11100 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11101 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11102 		    || (reg == 31)) ? 1 : phy;
   11103 	/* Page 800 works differently than the rest so it has its own func */
   11104 	if (page == BM_WUC_PAGE) {
   11105 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11106 		goto release;
   11107 	}
   11108 
   11109 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11110 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11111 		    && (sc->sc_type != WM_T_82583))
   11112 			rv = wm_gmii_mdic_writereg(dev, phy,
   11113 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11114 		else
   11115 			rv = wm_gmii_mdic_writereg(dev, phy,
   11116 			    BME1000_PHY_PAGE_SELECT, page);
   11117 		if (rv != 0)
   11118 			goto release;
   11119 	}
   11120 
   11121 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11122 
   11123 release:
   11124 	sc->phy.release(sc);
   11125 	return rv;
   11126 }
   11127 
   11128 /*
   11129  * wm_gmii_bm_writereg:	[mii interface function]
   11130  *
   11131  *	Write a PHY register on the kumeran.
   11132  * This could be handled by the PHY layer if we didn't have to lock the
   11133  * ressource ...
   11134  */
   11135 static int
   11136 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11137 {
   11138 	struct wm_softc *sc = device_private(dev);
   11139 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11140 	int rv;
   11141 
   11142 	if (sc->phy.acquire(sc)) {
   11143 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11144 		return -1;
   11145 	}
   11146 
   11147 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11148 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11149 		    || (reg == 31)) ? 1 : phy;
   11150 	/* Page 800 works differently than the rest so it has its own func */
   11151 	if (page == BM_WUC_PAGE) {
   11152 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11153 		goto release;
   11154 	}
   11155 
   11156 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11157 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11158 		    && (sc->sc_type != WM_T_82583))
   11159 			rv = wm_gmii_mdic_writereg(dev, phy,
   11160 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11161 		else
   11162 			rv = wm_gmii_mdic_writereg(dev, phy,
   11163 			    BME1000_PHY_PAGE_SELECT, page);
   11164 		if (rv != 0)
   11165 			goto release;
   11166 	}
   11167 
   11168 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11169 
   11170 release:
   11171 	sc->phy.release(sc);
   11172 	return rv;
   11173 }
   11174 
   11175 /*
   11176  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11177  *  @dev: pointer to the HW structure
   11178  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11179  *
   11180  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11181  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11182  */
   11183 static int
   11184 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11185 {
   11186 	uint16_t temp;
   11187 	int rv;
   11188 
   11189 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11190 		device_xname(dev), __func__));
   11191 
   11192 	if (!phy_regp)
   11193 		return -1;
   11194 
   11195 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11196 
   11197 	/* Select Port Control Registers page */
   11198 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11199 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11200 	if (rv != 0)
   11201 		return rv;
   11202 
   11203 	/* Read WUCE and save it */
   11204 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11205 	if (rv != 0)
   11206 		return rv;
   11207 
   11208 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11209 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11210 	 */
   11211 	temp = *phy_regp;
   11212 	temp |= BM_WUC_ENABLE_BIT;
   11213 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11214 
   11215 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11216 		return rv;
   11217 
   11218 	/* Select Host Wakeup Registers page - caller now able to write
   11219 	 * registers on the Wakeup registers page
   11220 	 */
   11221 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11222 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11223 }
   11224 
   11225 /*
   11226  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11227  *  @dev: pointer to the HW structure
   11228  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11229  *
   11230  *  Restore BM_WUC_ENABLE_REG to its original value.
   11231  *
   11232  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11233  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11234  *  caller.
   11235  */
   11236 static int
   11237 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11238 {
   11239 
   11240 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11241 		device_xname(dev), __func__));
   11242 
   11243 	if (!phy_regp)
   11244 		return -1;
   11245 
   11246 	/* Select Port Control Registers page */
   11247 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11248 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11249 
   11250 	/* Restore 769.17 to its original value */
   11251 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11252 
   11253 	return 0;
   11254 }
   11255 
   11256 /*
   11257  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11258  *  @sc: pointer to the HW structure
   11259  *  @offset: register offset to be read or written
   11260  *  @val: pointer to the data to read or write
   11261  *  @rd: determines if operation is read or write
   11262  *  @page_set: BM_WUC_PAGE already set and access enabled
   11263  *
   11264  *  Read the PHY register at offset and store the retrieved information in
   11265  *  data, or write data to PHY register at offset.  Note the procedure to
   11266  *  access the PHY wakeup registers is different than reading the other PHY
   11267  *  registers. It works as such:
   11268  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11269  *  2) Set page to 800 for host (801 if we were manageability)
   11270  *  3) Write the address using the address opcode (0x11)
   11271  *  4) Read or write the data using the data opcode (0x12)
   11272  *  5) Restore 769.17.2 to its original value
   11273  *
   11274  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11275  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11276  *
   11277  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11278  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11279  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11280  */
   11281 static int
   11282 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11283 	bool page_set)
   11284 {
   11285 	struct wm_softc *sc = device_private(dev);
   11286 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11287 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11288 	uint16_t wuce;
   11289 	int rv = 0;
   11290 
   11291 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11292 		device_xname(dev), __func__));
   11293 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11294 	if ((sc->sc_type == WM_T_PCH)
   11295 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11296 		device_printf(dev,
   11297 		    "Attempting to access page %d while gig enabled.\n", page);
   11298 	}
   11299 
   11300 	if (!page_set) {
   11301 		/* Enable access to PHY wakeup registers */
   11302 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11303 		if (rv != 0) {
   11304 			device_printf(dev,
   11305 			    "%s: Could not enable PHY wakeup reg access\n",
   11306 			    __func__);
   11307 			return rv;
   11308 		}
   11309 	}
   11310 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11311 		device_xname(sc->sc_dev), __func__, page, regnum));
   11312 
   11313 	/*
   11314 	 * 2) Access PHY wakeup register.
   11315 	 * See wm_access_phy_wakeup_reg_bm.
   11316 	 */
   11317 
   11318 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11319 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11320 	if (rv != 0)
   11321 		return rv;
   11322 
   11323 	if (rd) {
   11324 		/* Read the Wakeup register page value using opcode 0x12 */
   11325 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11326 	} else {
   11327 		/* Write the Wakeup register page value using opcode 0x12 */
   11328 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11329 	}
   11330 	if (rv != 0)
   11331 		return rv;
   11332 
   11333 	if (!page_set)
   11334 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11335 
   11336 	return rv;
   11337 }
   11338 
   11339 /*
   11340  * wm_gmii_hv_readreg:	[mii interface function]
   11341  *
   11342  *	Read a PHY register on the kumeran
   11343  * This could be handled by the PHY layer if we didn't have to lock the
   11344  * ressource ...
   11345  */
   11346 static int
   11347 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11348 {
   11349 	struct wm_softc *sc = device_private(dev);
   11350 	int rv;
   11351 
   11352 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11353 		device_xname(dev), __func__));
   11354 	if (sc->phy.acquire(sc)) {
   11355 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11356 		return -1;
   11357 	}
   11358 
   11359 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11360 	sc->phy.release(sc);
   11361 	return rv;
   11362 }
   11363 
   11364 static int
   11365 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11366 {
   11367 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11368 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11369 	int rv;
   11370 
   11371 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11372 
   11373 	/* Page 800 works differently than the rest so it has its own func */
   11374 	if (page == BM_WUC_PAGE)
   11375 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11376 
   11377 	/*
   11378 	 * Lower than page 768 works differently than the rest so it has its
   11379 	 * own func
   11380 	 */
   11381 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11382 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11383 		return -1;
   11384 	}
   11385 
   11386 	/*
   11387 	 * XXX I21[789] documents say that the SMBus Address register is at
   11388 	 * PHY address 01, Page 0 (not 768), Register 26.
   11389 	 */
   11390 	if (page == HV_INTC_FC_PAGE_START)
   11391 		page = 0;
   11392 
   11393 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11394 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11395 		    page << BME1000_PAGE_SHIFT);
   11396 		if (rv != 0)
   11397 			return rv;
   11398 	}
   11399 
   11400 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11401 }
   11402 
   11403 /*
   11404  * wm_gmii_hv_writereg:	[mii interface function]
   11405  *
   11406  *	Write a PHY register on the kumeran.
   11407  * This could be handled by the PHY layer if we didn't have to lock the
   11408  * ressource ...
   11409  */
   11410 static int
   11411 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11412 {
   11413 	struct wm_softc *sc = device_private(dev);
   11414 	int rv;
   11415 
   11416 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11417 		device_xname(dev), __func__));
   11418 
   11419 	if (sc->phy.acquire(sc)) {
   11420 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11421 		return -1;
   11422 	}
   11423 
   11424 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11425 	sc->phy.release(sc);
   11426 
   11427 	return rv;
   11428 }
   11429 
   11430 static int
   11431 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11432 {
   11433 	struct wm_softc *sc = device_private(dev);
   11434 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11435 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11436 	int rv;
   11437 
   11438 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11439 
   11440 	/* Page 800 works differently than the rest so it has its own func */
   11441 	if (page == BM_WUC_PAGE)
   11442 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11443 		    false);
   11444 
   11445 	/*
   11446 	 * Lower than page 768 works differently than the rest so it has its
   11447 	 * own func
   11448 	 */
   11449 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11450 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11451 		return -1;
   11452 	}
   11453 
   11454 	{
   11455 		/*
   11456 		 * XXX I21[789] documents say that the SMBus Address register
   11457 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11458 		 */
   11459 		if (page == HV_INTC_FC_PAGE_START)
   11460 			page = 0;
   11461 
   11462 		/*
   11463 		 * XXX Workaround MDIO accesses being disabled after entering
   11464 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11465 		 * register is set)
   11466 		 */
   11467 		if (sc->sc_phytype == WMPHY_82578) {
   11468 			struct mii_softc *child;
   11469 
   11470 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11471 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11472 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11473 			    && ((val & (1 << 11)) != 0)) {
   11474 				device_printf(dev, "XXX need workaround\n");
   11475 			}
   11476 		}
   11477 
   11478 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11479 			rv = wm_gmii_mdic_writereg(dev, 1,
   11480 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11481 			if (rv != 0)
   11482 				return rv;
   11483 		}
   11484 	}
   11485 
   11486 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11487 }
   11488 
   11489 /*
   11490  * wm_gmii_82580_readreg:	[mii interface function]
   11491  *
   11492  *	Read a PHY register on the 82580 and I350.
   11493  * This could be handled by the PHY layer if we didn't have to lock the
   11494  * ressource ...
   11495  */
   11496 static int
   11497 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11498 {
   11499 	struct wm_softc *sc = device_private(dev);
   11500 	int rv;
   11501 
   11502 	if (sc->phy.acquire(sc) != 0) {
   11503 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11504 		return -1;
   11505 	}
   11506 
   11507 #ifdef DIAGNOSTIC
   11508 	if (reg > MII_ADDRMASK) {
   11509 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11510 		    __func__, sc->sc_phytype, reg);
   11511 		reg &= MII_ADDRMASK;
   11512 	}
   11513 #endif
   11514 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11515 
   11516 	sc->phy.release(sc);
   11517 	return rv;
   11518 }
   11519 
   11520 /*
   11521  * wm_gmii_82580_writereg:	[mii interface function]
   11522  *
   11523  *	Write a PHY register on the 82580 and I350.
   11524  * This could be handled by the PHY layer if we didn't have to lock the
   11525  * ressource ...
   11526  */
   11527 static int
   11528 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11529 {
   11530 	struct wm_softc *sc = device_private(dev);
   11531 	int rv;
   11532 
   11533 	if (sc->phy.acquire(sc) != 0) {
   11534 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11535 		return -1;
   11536 	}
   11537 
   11538 #ifdef DIAGNOSTIC
   11539 	if (reg > MII_ADDRMASK) {
   11540 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11541 		    __func__, sc->sc_phytype, reg);
   11542 		reg &= MII_ADDRMASK;
   11543 	}
   11544 #endif
   11545 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11546 
   11547 	sc->phy.release(sc);
   11548 	return rv;
   11549 }
   11550 
   11551 /*
   11552  * wm_gmii_gs40g_readreg:	[mii interface function]
   11553  *
   11554  *	Read a PHY register on the I2100 and I211.
   11555  * This could be handled by the PHY layer if we didn't have to lock the
   11556  * ressource ...
   11557  */
   11558 static int
   11559 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11560 {
   11561 	struct wm_softc *sc = device_private(dev);
   11562 	int page, offset;
   11563 	int rv;
   11564 
   11565 	/* Acquire semaphore */
   11566 	if (sc->phy.acquire(sc)) {
   11567 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11568 		return -1;
   11569 	}
   11570 
   11571 	/* Page select */
   11572 	page = reg >> GS40G_PAGE_SHIFT;
   11573 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11574 	if (rv != 0)
   11575 		goto release;
   11576 
   11577 	/* Read reg */
   11578 	offset = reg & GS40G_OFFSET_MASK;
   11579 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11580 
   11581 release:
   11582 	sc->phy.release(sc);
   11583 	return rv;
   11584 }
   11585 
   11586 /*
   11587  * wm_gmii_gs40g_writereg:	[mii interface function]
   11588  *
   11589  *	Write a PHY register on the I210 and I211.
   11590  * This could be handled by the PHY layer if we didn't have to lock the
   11591  * ressource ...
   11592  */
   11593 static int
   11594 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11595 {
   11596 	struct wm_softc *sc = device_private(dev);
   11597 	uint16_t page;
   11598 	int offset, rv;
   11599 
   11600 	/* Acquire semaphore */
   11601 	if (sc->phy.acquire(sc)) {
   11602 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11603 		return -1;
   11604 	}
   11605 
   11606 	/* Page select */
   11607 	page = reg >> GS40G_PAGE_SHIFT;
   11608 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11609 	if (rv != 0)
   11610 		goto release;
   11611 
   11612 	/* Write reg */
   11613 	offset = reg & GS40G_OFFSET_MASK;
   11614 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11615 
   11616 release:
   11617 	/* Release semaphore */
   11618 	sc->phy.release(sc);
   11619 	return rv;
   11620 }
   11621 
   11622 /*
   11623  * wm_gmii_statchg:	[mii interface function]
   11624  *
   11625  *	Callback from MII layer when media changes.
   11626  */
   11627 static void
   11628 wm_gmii_statchg(struct ifnet *ifp)
   11629 {
   11630 	struct wm_softc *sc = ifp->if_softc;
   11631 	struct mii_data *mii = &sc->sc_mii;
   11632 
   11633 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11634 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11635 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11636 
   11637 	/* Get flow control negotiation result. */
   11638 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11639 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11640 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11641 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11642 	}
   11643 
   11644 	if (sc->sc_flowflags & IFM_FLOW) {
   11645 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11646 			sc->sc_ctrl |= CTRL_TFCE;
   11647 			sc->sc_fcrtl |= FCRTL_XONE;
   11648 		}
   11649 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11650 			sc->sc_ctrl |= CTRL_RFCE;
   11651 	}
   11652 
   11653 	if (mii->mii_media_active & IFM_FDX) {
   11654 		DPRINTF(WM_DEBUG_LINK,
   11655 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11656 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11657 	} else {
   11658 		DPRINTF(WM_DEBUG_LINK,
   11659 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11660 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11661 	}
   11662 
   11663 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11664 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11665 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11666 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11667 	if (sc->sc_type == WM_T_80003) {
   11668 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11669 		case IFM_1000_T:
   11670 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11671 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11672 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11673 			break;
   11674 		default:
   11675 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11676 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11677 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11678 			break;
   11679 		}
   11680 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11681 	}
   11682 }
   11683 
   11684 /* kumeran related (80003, ICH* and PCH*) */
   11685 
   11686 /*
   11687  * wm_kmrn_readreg:
   11688  *
   11689  *	Read a kumeran register
   11690  */
   11691 static int
   11692 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11693 {
   11694 	int rv;
   11695 
   11696 	if (sc->sc_type == WM_T_80003)
   11697 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11698 	else
   11699 		rv = sc->phy.acquire(sc);
   11700 	if (rv != 0) {
   11701 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11702 		    __func__);
   11703 		return rv;
   11704 	}
   11705 
   11706 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11707 
   11708 	if (sc->sc_type == WM_T_80003)
   11709 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11710 	else
   11711 		sc->phy.release(sc);
   11712 
   11713 	return rv;
   11714 }
   11715 
   11716 static int
   11717 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11718 {
   11719 
   11720 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11721 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11722 	    KUMCTRLSTA_REN);
   11723 	CSR_WRITE_FLUSH(sc);
   11724 	delay(2);
   11725 
   11726 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11727 
   11728 	return 0;
   11729 }
   11730 
   11731 /*
   11732  * wm_kmrn_writereg:
   11733  *
   11734  *	Write a kumeran register
   11735  */
   11736 static int
   11737 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11738 {
   11739 	int rv;
   11740 
   11741 	if (sc->sc_type == WM_T_80003)
   11742 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11743 	else
   11744 		rv = sc->phy.acquire(sc);
   11745 	if (rv != 0) {
   11746 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11747 		    __func__);
   11748 		return rv;
   11749 	}
   11750 
   11751 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11752 
   11753 	if (sc->sc_type == WM_T_80003)
   11754 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11755 	else
   11756 		sc->phy.release(sc);
   11757 
   11758 	return rv;
   11759 }
   11760 
   11761 static int
   11762 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11763 {
   11764 
   11765 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11766 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11767 
   11768 	return 0;
   11769 }
   11770 
   11771 /*
   11772  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11773  * This access method is different from IEEE MMD.
   11774  */
   11775 static int
   11776 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11777 {
   11778 	struct wm_softc *sc = device_private(dev);
   11779 	int rv;
   11780 
   11781 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11782 	if (rv != 0)
   11783 		return rv;
   11784 
   11785 	if (rd)
   11786 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11787 	else
   11788 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11789 	return rv;
   11790 }
   11791 
   11792 static int
   11793 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11794 {
   11795 
   11796 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11797 }
   11798 
   11799 static int
   11800 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11801 {
   11802 
   11803 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11804 }
   11805 
   11806 /* SGMII related */
   11807 
   11808 /*
   11809  * wm_sgmii_uses_mdio
   11810  *
   11811  * Check whether the transaction is to the internal PHY or the external
   11812  * MDIO interface. Return true if it's MDIO.
   11813  */
   11814 static bool
   11815 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11816 {
   11817 	uint32_t reg;
   11818 	bool ismdio = false;
   11819 
   11820 	switch (sc->sc_type) {
   11821 	case WM_T_82575:
   11822 	case WM_T_82576:
   11823 		reg = CSR_READ(sc, WMREG_MDIC);
   11824 		ismdio = ((reg & MDIC_DEST) != 0);
   11825 		break;
   11826 	case WM_T_82580:
   11827 	case WM_T_I350:
   11828 	case WM_T_I354:
   11829 	case WM_T_I210:
   11830 	case WM_T_I211:
   11831 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11832 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11833 		break;
   11834 	default:
   11835 		break;
   11836 	}
   11837 
   11838 	return ismdio;
   11839 }
   11840 
   11841 /*
   11842  * wm_sgmii_readreg:	[mii interface function]
   11843  *
   11844  *	Read a PHY register on the SGMII
   11845  * This could be handled by the PHY layer if we didn't have to lock the
   11846  * ressource ...
   11847  */
   11848 static int
   11849 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11850 {
   11851 	struct wm_softc *sc = device_private(dev);
   11852 	int rv;
   11853 
   11854 	if (sc->phy.acquire(sc)) {
   11855 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11856 		return -1;
   11857 	}
   11858 
   11859 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11860 
   11861 	sc->phy.release(sc);
   11862 	return rv;
   11863 }
   11864 
   11865 static int
   11866 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11867 {
   11868 	struct wm_softc *sc = device_private(dev);
   11869 	uint32_t i2ccmd;
   11870 	int i, rv = 0;
   11871 
   11872 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11873 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11874 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11875 
   11876 	/* Poll the ready bit */
   11877 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11878 		delay(50);
   11879 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11880 		if (i2ccmd & I2CCMD_READY)
   11881 			break;
   11882 	}
   11883 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11884 		device_printf(dev, "I2CCMD Read did not complete\n");
   11885 		rv = ETIMEDOUT;
   11886 	}
   11887 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11888 		if (!sc->phy.no_errprint)
   11889 			device_printf(dev, "I2CCMD Error bit set\n");
   11890 		rv = EIO;
   11891 	}
   11892 
   11893 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11894 
   11895 	return rv;
   11896 }
   11897 
   11898 /*
   11899  * wm_sgmii_writereg:	[mii interface function]
   11900  *
   11901  *	Write a PHY register on the SGMII.
   11902  * This could be handled by the PHY layer if we didn't have to lock the
   11903  * ressource ...
   11904  */
   11905 static int
   11906 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11907 {
   11908 	struct wm_softc *sc = device_private(dev);
   11909 	int rv;
   11910 
   11911 	if (sc->phy.acquire(sc) != 0) {
   11912 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11913 		return -1;
   11914 	}
   11915 
   11916 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11917 
   11918 	sc->phy.release(sc);
   11919 
   11920 	return rv;
   11921 }
   11922 
   11923 static int
   11924 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11925 {
   11926 	struct wm_softc *sc = device_private(dev);
   11927 	uint32_t i2ccmd;
   11928 	uint16_t swapdata;
   11929 	int rv = 0;
   11930 	int i;
   11931 
   11932 	/* Swap the data bytes for the I2C interface */
   11933 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11934 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11935 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11936 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11937 
   11938 	/* Poll the ready bit */
   11939 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11940 		delay(50);
   11941 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11942 		if (i2ccmd & I2CCMD_READY)
   11943 			break;
   11944 	}
   11945 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11946 		device_printf(dev, "I2CCMD Write did not complete\n");
   11947 		rv = ETIMEDOUT;
   11948 	}
   11949 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11950 		device_printf(dev, "I2CCMD Error bit set\n");
   11951 		rv = EIO;
   11952 	}
   11953 
   11954 	return rv;
   11955 }
   11956 
   11957 /* TBI related */
   11958 
   11959 static bool
   11960 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11961 {
   11962 	bool sig;
   11963 
   11964 	sig = ctrl & CTRL_SWDPIN(1);
   11965 
   11966 	/*
   11967 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11968 	 * detect a signal, 1 if they don't.
   11969 	 */
   11970 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11971 		sig = !sig;
   11972 
   11973 	return sig;
   11974 }
   11975 
   11976 /*
   11977  * wm_tbi_mediainit:
   11978  *
   11979  *	Initialize media for use on 1000BASE-X devices.
   11980  */
   11981 static void
   11982 wm_tbi_mediainit(struct wm_softc *sc)
   11983 {
   11984 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11985 	const char *sep = "";
   11986 
   11987 	if (sc->sc_type < WM_T_82543)
   11988 		sc->sc_tipg = TIPG_WM_DFLT;
   11989 	else
   11990 		sc->sc_tipg = TIPG_LG_DFLT;
   11991 
   11992 	sc->sc_tbi_serdes_anegticks = 5;
   11993 
   11994 	/* Initialize our media structures */
   11995 	sc->sc_mii.mii_ifp = ifp;
   11996 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11997 
   11998 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11999 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12000 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12001 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12002 		    sc->sc_core_lock);
   12003 	} else {
   12004 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12005 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12006 	}
   12007 
   12008 	/*
   12009 	 * SWD Pins:
   12010 	 *
   12011 	 *	0 = Link LED (output)
   12012 	 *	1 = Loss Of Signal (input)
   12013 	 */
   12014 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12015 
   12016 	/* XXX Perhaps this is only for TBI */
   12017 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12018 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12019 
   12020 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12021 		sc->sc_ctrl &= ~CTRL_LRST;
   12022 
   12023 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12024 
   12025 #define	ADD(ss, mm, dd)							\
   12026 do {									\
   12027 	aprint_normal("%s%s", sep, ss);					\
   12028 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12029 	sep = ", ";							\
   12030 } while (/*CONSTCOND*/0)
   12031 
   12032 	aprint_normal_dev(sc->sc_dev, "");
   12033 
   12034 	if (sc->sc_type == WM_T_I354) {
   12035 		uint32_t status;
   12036 
   12037 		status = CSR_READ(sc, WMREG_STATUS);
   12038 		if (((status & STATUS_2P5_SKU) != 0)
   12039 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12040 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12041 		} else
   12042 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12043 	} else if (sc->sc_type == WM_T_82545) {
   12044 		/* Only 82545 is LX (XXX except SFP) */
   12045 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12046 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12047 	} else if (sc->sc_sfptype != 0) {
   12048 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12049 		switch (sc->sc_sfptype) {
   12050 		default:
   12051 		case SFF_SFP_ETH_FLAGS_1000SX:
   12052 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12053 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12054 			break;
   12055 		case SFF_SFP_ETH_FLAGS_1000LX:
   12056 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12057 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12058 			break;
   12059 		case SFF_SFP_ETH_FLAGS_1000CX:
   12060 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12061 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12062 			break;
   12063 		case SFF_SFP_ETH_FLAGS_1000T:
   12064 			ADD("1000baseT", IFM_1000_T, 0);
   12065 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12066 			break;
   12067 		case SFF_SFP_ETH_FLAGS_100FX:
   12068 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12069 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12070 			break;
   12071 		}
   12072 	} else {
   12073 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12074 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12075 	}
   12076 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12077 	aprint_normal("\n");
   12078 
   12079 #undef ADD
   12080 
   12081 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12082 }
   12083 
   12084 /*
   12085  * wm_tbi_mediachange:	[ifmedia interface function]
   12086  *
   12087  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12088  */
   12089 static int
   12090 wm_tbi_mediachange(struct ifnet *ifp)
   12091 {
   12092 	struct wm_softc *sc = ifp->if_softc;
   12093 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12094 	uint32_t status, ctrl;
   12095 	bool signal;
   12096 	int i;
   12097 
   12098 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12099 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12100 		/* XXX need some work for >= 82571 and < 82575 */
   12101 		if (sc->sc_type < WM_T_82575)
   12102 			return 0;
   12103 	}
   12104 
   12105 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12106 	    || (sc->sc_type >= WM_T_82575))
   12107 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12108 
   12109 	sc->sc_ctrl &= ~CTRL_LRST;
   12110 	sc->sc_txcw = TXCW_ANE;
   12111 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12112 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12113 	else if (ife->ifm_media & IFM_FDX)
   12114 		sc->sc_txcw |= TXCW_FD;
   12115 	else
   12116 		sc->sc_txcw |= TXCW_HD;
   12117 
   12118 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12119 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12120 
   12121 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12122 		device_xname(sc->sc_dev), sc->sc_txcw));
   12123 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12124 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12125 	CSR_WRITE_FLUSH(sc);
   12126 	delay(1000);
   12127 
   12128 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12129 	signal = wm_tbi_havesignal(sc, ctrl);
   12130 
   12131 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12132 		signal));
   12133 
   12134 	if (signal) {
   12135 		/* Have signal; wait for the link to come up. */
   12136 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12137 			delay(10000);
   12138 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12139 				break;
   12140 		}
   12141 
   12142 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12143 			device_xname(sc->sc_dev), i));
   12144 
   12145 		status = CSR_READ(sc, WMREG_STATUS);
   12146 		DPRINTF(WM_DEBUG_LINK,
   12147 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12148 			device_xname(sc->sc_dev), status, STATUS_LU));
   12149 		if (status & STATUS_LU) {
   12150 			/* Link is up. */
   12151 			DPRINTF(WM_DEBUG_LINK,
   12152 			    ("%s: LINK: set media -> link up %s\n",
   12153 				device_xname(sc->sc_dev),
   12154 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12155 
   12156 			/*
   12157 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12158 			 * so we should update sc->sc_ctrl
   12159 			 */
   12160 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12161 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12162 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12163 			if (status & STATUS_FD)
   12164 				sc->sc_tctl |=
   12165 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12166 			else
   12167 				sc->sc_tctl |=
   12168 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12169 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12170 				sc->sc_fcrtl |= FCRTL_XONE;
   12171 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12172 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12173 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12174 			sc->sc_tbi_linkup = 1;
   12175 		} else {
   12176 			if (i == WM_LINKUP_TIMEOUT)
   12177 				wm_check_for_link(sc);
   12178 			/* Link is down. */
   12179 			DPRINTF(WM_DEBUG_LINK,
   12180 			    ("%s: LINK: set media -> link down\n",
   12181 				device_xname(sc->sc_dev)));
   12182 			sc->sc_tbi_linkup = 0;
   12183 		}
   12184 	} else {
   12185 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12186 			device_xname(sc->sc_dev)));
   12187 		sc->sc_tbi_linkup = 0;
   12188 	}
   12189 
   12190 	wm_tbi_serdes_set_linkled(sc);
   12191 
   12192 	return 0;
   12193 }
   12194 
   12195 /*
   12196  * wm_tbi_mediastatus:	[ifmedia interface function]
   12197  *
   12198  *	Get the current interface media status on a 1000BASE-X device.
   12199  */
   12200 static void
   12201 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12202 {
   12203 	struct wm_softc *sc = ifp->if_softc;
   12204 	uint32_t ctrl, status;
   12205 
   12206 	ifmr->ifm_status = IFM_AVALID;
   12207 	ifmr->ifm_active = IFM_ETHER;
   12208 
   12209 	status = CSR_READ(sc, WMREG_STATUS);
   12210 	if ((status & STATUS_LU) == 0) {
   12211 		ifmr->ifm_active |= IFM_NONE;
   12212 		return;
   12213 	}
   12214 
   12215 	ifmr->ifm_status |= IFM_ACTIVE;
   12216 	/* Only 82545 is LX */
   12217 	if (sc->sc_type == WM_T_82545)
   12218 		ifmr->ifm_active |= IFM_1000_LX;
   12219 	else
   12220 		ifmr->ifm_active |= IFM_1000_SX;
   12221 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12222 		ifmr->ifm_active |= IFM_FDX;
   12223 	else
   12224 		ifmr->ifm_active |= IFM_HDX;
   12225 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12226 	if (ctrl & CTRL_RFCE)
   12227 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12228 	if (ctrl & CTRL_TFCE)
   12229 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12230 }
   12231 
   12232 /* XXX TBI only */
   12233 static int
   12234 wm_check_for_link(struct wm_softc *sc)
   12235 {
   12236 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12237 	uint32_t rxcw;
   12238 	uint32_t ctrl;
   12239 	uint32_t status;
   12240 	bool signal;
   12241 
   12242 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   12243 		device_xname(sc->sc_dev), __func__));
   12244 
   12245 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12246 		/* XXX need some work for >= 82571 */
   12247 		if (sc->sc_type >= WM_T_82571) {
   12248 			sc->sc_tbi_linkup = 1;
   12249 			return 0;
   12250 		}
   12251 	}
   12252 
   12253 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12254 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12255 	status = CSR_READ(sc, WMREG_STATUS);
   12256 	signal = wm_tbi_havesignal(sc, ctrl);
   12257 
   12258 	DPRINTF(WM_DEBUG_LINK,
   12259 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12260 		device_xname(sc->sc_dev), __func__, signal,
   12261 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12262 
   12263 	/*
   12264 	 * SWDPIN   LU RXCW
   12265 	 *	0    0	  0
   12266 	 *	0    0	  1	(should not happen)
   12267 	 *	0    1	  0	(should not happen)
   12268 	 *	0    1	  1	(should not happen)
   12269 	 *	1    0	  0	Disable autonego and force linkup
   12270 	 *	1    0	  1	got /C/ but not linkup yet
   12271 	 *	1    1	  0	(linkup)
   12272 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12273 	 *
   12274 	 */
   12275 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12276 		DPRINTF(WM_DEBUG_LINK,
   12277 		    ("%s: %s: force linkup and fullduplex\n",
   12278 			device_xname(sc->sc_dev), __func__));
   12279 		sc->sc_tbi_linkup = 0;
   12280 		/* Disable auto-negotiation in the TXCW register */
   12281 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12282 
   12283 		/*
   12284 		 * Force link-up and also force full-duplex.
   12285 		 *
   12286 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12287 		 * so we should update sc->sc_ctrl
   12288 		 */
   12289 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12290 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12291 	} else if (((status & STATUS_LU) != 0)
   12292 	    && ((rxcw & RXCW_C) != 0)
   12293 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12294 		sc->sc_tbi_linkup = 1;
   12295 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12296 			device_xname(sc->sc_dev),
   12297 			__func__));
   12298 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12299 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12300 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12301 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12302 			device_xname(sc->sc_dev), __func__));
   12303 	} else {
   12304 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12305 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12306 			status));
   12307 	}
   12308 
   12309 	return 0;
   12310 }
   12311 
   12312 /*
   12313  * wm_tbi_tick:
   12314  *
   12315  *	Check the link on TBI devices.
   12316  *	This function acts as mii_tick().
   12317  */
   12318 static void
   12319 wm_tbi_tick(struct wm_softc *sc)
   12320 {
   12321 	struct mii_data *mii = &sc->sc_mii;
   12322 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12323 	uint32_t status;
   12324 
   12325 	KASSERT(WM_CORE_LOCKED(sc));
   12326 
   12327 	status = CSR_READ(sc, WMREG_STATUS);
   12328 
   12329 	/* XXX is this needed? */
   12330 	(void)CSR_READ(sc, WMREG_RXCW);
   12331 	(void)CSR_READ(sc, WMREG_CTRL);
   12332 
   12333 	/* set link status */
   12334 	if ((status & STATUS_LU) == 0) {
   12335 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12336 			device_xname(sc->sc_dev)));
   12337 		sc->sc_tbi_linkup = 0;
   12338 	} else if (sc->sc_tbi_linkup == 0) {
   12339 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12340 			device_xname(sc->sc_dev),
   12341 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12342 		sc->sc_tbi_linkup = 1;
   12343 		sc->sc_tbi_serdes_ticks = 0;
   12344 	}
   12345 
   12346 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12347 		goto setled;
   12348 
   12349 	if ((status & STATUS_LU) == 0) {
   12350 		sc->sc_tbi_linkup = 0;
   12351 		/* If the timer expired, retry autonegotiation */
   12352 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12353 		    && (++sc->sc_tbi_serdes_ticks
   12354 			>= sc->sc_tbi_serdes_anegticks)) {
   12355 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12356 				device_xname(sc->sc_dev), __func__));
   12357 			sc->sc_tbi_serdes_ticks = 0;
   12358 			/*
   12359 			 * Reset the link, and let autonegotiation do
   12360 			 * its thing
   12361 			 */
   12362 			sc->sc_ctrl |= CTRL_LRST;
   12363 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12364 			CSR_WRITE_FLUSH(sc);
   12365 			delay(1000);
   12366 			sc->sc_ctrl &= ~CTRL_LRST;
   12367 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12368 			CSR_WRITE_FLUSH(sc);
   12369 			delay(1000);
   12370 			CSR_WRITE(sc, WMREG_TXCW,
   12371 			    sc->sc_txcw & ~TXCW_ANE);
   12372 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12373 		}
   12374 	}
   12375 
   12376 setled:
   12377 	wm_tbi_serdes_set_linkled(sc);
   12378 }
   12379 
   12380 /* SERDES related */
   12381 static void
   12382 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12383 {
   12384 	uint32_t reg;
   12385 
   12386 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12387 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12388 		return;
   12389 
   12390 	/* Enable PCS to turn on link */
   12391 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12392 	reg |= PCS_CFG_PCS_EN;
   12393 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12394 
   12395 	/* Power up the laser */
   12396 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12397 	reg &= ~CTRL_EXT_SWDPIN(3);
   12398 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12399 
   12400 	/* Flush the write to verify completion */
   12401 	CSR_WRITE_FLUSH(sc);
   12402 	delay(1000);
   12403 }
   12404 
   12405 static int
   12406 wm_serdes_mediachange(struct ifnet *ifp)
   12407 {
   12408 	struct wm_softc *sc = ifp->if_softc;
   12409 	bool pcs_autoneg = true; /* XXX */
   12410 	uint32_t ctrl_ext, pcs_lctl, reg;
   12411 
   12412 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12413 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12414 		return 0;
   12415 
   12416 	/* XXX Currently, this function is not called on 8257[12] */
   12417 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12418 	    || (sc->sc_type >= WM_T_82575))
   12419 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12420 
   12421 	/* Power on the sfp cage if present */
   12422 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12423 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12424 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12425 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12426 
   12427 	sc->sc_ctrl |= CTRL_SLU;
   12428 
   12429 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12430 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12431 
   12432 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12433 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12434 	case CTRL_EXT_LINK_MODE_SGMII:
   12435 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12436 		pcs_autoneg = true;
   12437 		/* Autoneg time out should be disabled for SGMII mode */
   12438 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12439 		break;
   12440 	case CTRL_EXT_LINK_MODE_1000KX:
   12441 		pcs_autoneg = false;
   12442 		/* FALLTHROUGH */
   12443 	default:
   12444 		if ((sc->sc_type == WM_T_82575)
   12445 		    || (sc->sc_type == WM_T_82576)) {
   12446 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12447 				pcs_autoneg = false;
   12448 		}
   12449 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12450 		    | CTRL_FRCFDX;
   12451 
   12452 		/* Set speed of 1000/Full if speed/duplex is forced */
   12453 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12454 	}
   12455 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12456 
   12457 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12458 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12459 
   12460 	if (pcs_autoneg) {
   12461 		/* Set PCS register for autoneg */
   12462 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12463 
   12464 		/* Disable force flow control for autoneg */
   12465 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12466 
   12467 		/* Configure flow control advertisement for autoneg */
   12468 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12469 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12470 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12471 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12472 	} else
   12473 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12474 
   12475 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12476 
   12477 	return 0;
   12478 }
   12479 
   12480 static void
   12481 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12482 {
   12483 	struct wm_softc *sc = ifp->if_softc;
   12484 	struct mii_data *mii = &sc->sc_mii;
   12485 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12486 	uint32_t pcs_adv, pcs_lpab, reg;
   12487 
   12488 	ifmr->ifm_status = IFM_AVALID;
   12489 	ifmr->ifm_active = IFM_ETHER;
   12490 
   12491 	/* Check PCS */
   12492 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12493 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12494 		ifmr->ifm_active |= IFM_NONE;
   12495 		sc->sc_tbi_linkup = 0;
   12496 		goto setled;
   12497 	}
   12498 
   12499 	sc->sc_tbi_linkup = 1;
   12500 	ifmr->ifm_status |= IFM_ACTIVE;
   12501 	if (sc->sc_type == WM_T_I354) {
   12502 		uint32_t status;
   12503 
   12504 		status = CSR_READ(sc, WMREG_STATUS);
   12505 		if (((status & STATUS_2P5_SKU) != 0)
   12506 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12507 			ifmr->ifm_active |= IFM_2500_KX;
   12508 		} else
   12509 			ifmr->ifm_active |= IFM_1000_KX;
   12510 	} else {
   12511 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12512 		case PCS_LSTS_SPEED_10:
   12513 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12514 			break;
   12515 		case PCS_LSTS_SPEED_100:
   12516 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12517 			break;
   12518 		case PCS_LSTS_SPEED_1000:
   12519 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12520 			break;
   12521 		default:
   12522 			device_printf(sc->sc_dev, "Unknown speed\n");
   12523 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12524 			break;
   12525 		}
   12526 	}
   12527 	if ((reg & PCS_LSTS_FDX) != 0)
   12528 		ifmr->ifm_active |= IFM_FDX;
   12529 	else
   12530 		ifmr->ifm_active |= IFM_HDX;
   12531 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12532 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12533 		/* Check flow */
   12534 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12535 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12536 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12537 			goto setled;
   12538 		}
   12539 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12540 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12541 		DPRINTF(WM_DEBUG_LINK,
   12542 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12543 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12544 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12545 			mii->mii_media_active |= IFM_FLOW
   12546 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12547 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12548 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12549 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12550 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12551 			mii->mii_media_active |= IFM_FLOW
   12552 			    | IFM_ETH_TXPAUSE;
   12553 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12554 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12555 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12556 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12557 			mii->mii_media_active |= IFM_FLOW
   12558 			    | IFM_ETH_RXPAUSE;
   12559 		}
   12560 	}
   12561 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12562 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12563 setled:
   12564 	wm_tbi_serdes_set_linkled(sc);
   12565 }
   12566 
   12567 /*
   12568  * wm_serdes_tick:
   12569  *
   12570  *	Check the link on serdes devices.
   12571  */
   12572 static void
   12573 wm_serdes_tick(struct wm_softc *sc)
   12574 {
   12575 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12576 	struct mii_data *mii = &sc->sc_mii;
   12577 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12578 	uint32_t reg;
   12579 
   12580 	KASSERT(WM_CORE_LOCKED(sc));
   12581 
   12582 	mii->mii_media_status = IFM_AVALID;
   12583 	mii->mii_media_active = IFM_ETHER;
   12584 
   12585 	/* Check PCS */
   12586 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12587 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12588 		mii->mii_media_status |= IFM_ACTIVE;
   12589 		sc->sc_tbi_linkup = 1;
   12590 		sc->sc_tbi_serdes_ticks = 0;
   12591 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12592 		if ((reg & PCS_LSTS_FDX) != 0)
   12593 			mii->mii_media_active |= IFM_FDX;
   12594 		else
   12595 			mii->mii_media_active |= IFM_HDX;
   12596 	} else {
   12597 		mii->mii_media_status |= IFM_NONE;
   12598 		sc->sc_tbi_linkup = 0;
   12599 		/* If the timer expired, retry autonegotiation */
   12600 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12601 		    && (++sc->sc_tbi_serdes_ticks
   12602 			>= sc->sc_tbi_serdes_anegticks)) {
   12603 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12604 				device_xname(sc->sc_dev), __func__));
   12605 			sc->sc_tbi_serdes_ticks = 0;
   12606 			/* XXX */
   12607 			wm_serdes_mediachange(ifp);
   12608 		}
   12609 	}
   12610 
   12611 	wm_tbi_serdes_set_linkled(sc);
   12612 }
   12613 
   12614 /* SFP related */
   12615 
   12616 static int
   12617 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12618 {
   12619 	uint32_t i2ccmd;
   12620 	int i;
   12621 
   12622 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12623 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12624 
   12625 	/* Poll the ready bit */
   12626 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12627 		delay(50);
   12628 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12629 		if (i2ccmd & I2CCMD_READY)
   12630 			break;
   12631 	}
   12632 	if ((i2ccmd & I2CCMD_READY) == 0)
   12633 		return -1;
   12634 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12635 		return -1;
   12636 
   12637 	*data = i2ccmd & 0x00ff;
   12638 
   12639 	return 0;
   12640 }
   12641 
   12642 static uint32_t
   12643 wm_sfp_get_media_type(struct wm_softc *sc)
   12644 {
   12645 	uint32_t ctrl_ext;
   12646 	uint8_t val = 0;
   12647 	int timeout = 3;
   12648 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12649 	int rv = -1;
   12650 
   12651 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12652 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12653 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12654 	CSR_WRITE_FLUSH(sc);
   12655 
   12656 	/* Read SFP module data */
   12657 	while (timeout) {
   12658 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12659 		if (rv == 0)
   12660 			break;
   12661 		delay(100*1000); /* XXX too big */
   12662 		timeout--;
   12663 	}
   12664 	if (rv != 0)
   12665 		goto out;
   12666 
   12667 	switch (val) {
   12668 	case SFF_SFP_ID_SFF:
   12669 		aprint_normal_dev(sc->sc_dev,
   12670 		    "Module/Connector soldered to board\n");
   12671 		break;
   12672 	case SFF_SFP_ID_SFP:
   12673 		sc->sc_flags |= WM_F_SFP;
   12674 		break;
   12675 	case SFF_SFP_ID_UNKNOWN:
   12676 		goto out;
   12677 	default:
   12678 		break;
   12679 	}
   12680 
   12681 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12682 	if (rv != 0)
   12683 		goto out;
   12684 
   12685 	sc->sc_sfptype = val;
   12686 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12687 		mediatype = WM_MEDIATYPE_SERDES;
   12688 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12689 		sc->sc_flags |= WM_F_SGMII;
   12690 		mediatype = WM_MEDIATYPE_COPPER;
   12691 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12692 		sc->sc_flags |= WM_F_SGMII;
   12693 		mediatype = WM_MEDIATYPE_SERDES;
   12694 	} else {
   12695 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   12696 		    __func__, sc->sc_sfptype);
   12697 		sc->sc_sfptype = 0; /* XXX unknown */
   12698 	}
   12699 
   12700 out:
   12701 	/* Restore I2C interface setting */
   12702 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12703 
   12704 	return mediatype;
   12705 }
   12706 
   12707 /*
   12708  * NVM related.
   12709  * Microwire, SPI (w/wo EERD) and Flash.
   12710  */
   12711 
   12712 /* Both spi and uwire */
   12713 
   12714 /*
   12715  * wm_eeprom_sendbits:
   12716  *
   12717  *	Send a series of bits to the EEPROM.
   12718  */
   12719 static void
   12720 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12721 {
   12722 	uint32_t reg;
   12723 	int x;
   12724 
   12725 	reg = CSR_READ(sc, WMREG_EECD);
   12726 
   12727 	for (x = nbits; x > 0; x--) {
   12728 		if (bits & (1U << (x - 1)))
   12729 			reg |= EECD_DI;
   12730 		else
   12731 			reg &= ~EECD_DI;
   12732 		CSR_WRITE(sc, WMREG_EECD, reg);
   12733 		CSR_WRITE_FLUSH(sc);
   12734 		delay(2);
   12735 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12736 		CSR_WRITE_FLUSH(sc);
   12737 		delay(2);
   12738 		CSR_WRITE(sc, WMREG_EECD, reg);
   12739 		CSR_WRITE_FLUSH(sc);
   12740 		delay(2);
   12741 	}
   12742 }
   12743 
   12744 /*
   12745  * wm_eeprom_recvbits:
   12746  *
   12747  *	Receive a series of bits from the EEPROM.
   12748  */
   12749 static void
   12750 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12751 {
   12752 	uint32_t reg, val;
   12753 	int x;
   12754 
   12755 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12756 
   12757 	val = 0;
   12758 	for (x = nbits; x > 0; x--) {
   12759 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12760 		CSR_WRITE_FLUSH(sc);
   12761 		delay(2);
   12762 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12763 			val |= (1U << (x - 1));
   12764 		CSR_WRITE(sc, WMREG_EECD, reg);
   12765 		CSR_WRITE_FLUSH(sc);
   12766 		delay(2);
   12767 	}
   12768 	*valp = val;
   12769 }
   12770 
   12771 /* Microwire */
   12772 
   12773 /*
   12774  * wm_nvm_read_uwire:
   12775  *
   12776  *	Read a word from the EEPROM using the MicroWire protocol.
   12777  */
   12778 static int
   12779 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12780 {
   12781 	uint32_t reg, val;
   12782 	int i;
   12783 
   12784 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12785 		device_xname(sc->sc_dev), __func__));
   12786 
   12787 	if (sc->nvm.acquire(sc) != 0)
   12788 		return -1;
   12789 
   12790 	for (i = 0; i < wordcnt; i++) {
   12791 		/* Clear SK and DI. */
   12792 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12793 		CSR_WRITE(sc, WMREG_EECD, reg);
   12794 
   12795 		/*
   12796 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12797 		 * and Xen.
   12798 		 *
   12799 		 * We use this workaround only for 82540 because qemu's
   12800 		 * e1000 act as 82540.
   12801 		 */
   12802 		if (sc->sc_type == WM_T_82540) {
   12803 			reg |= EECD_SK;
   12804 			CSR_WRITE(sc, WMREG_EECD, reg);
   12805 			reg &= ~EECD_SK;
   12806 			CSR_WRITE(sc, WMREG_EECD, reg);
   12807 			CSR_WRITE_FLUSH(sc);
   12808 			delay(2);
   12809 		}
   12810 		/* XXX: end of workaround */
   12811 
   12812 		/* Set CHIP SELECT. */
   12813 		reg |= EECD_CS;
   12814 		CSR_WRITE(sc, WMREG_EECD, reg);
   12815 		CSR_WRITE_FLUSH(sc);
   12816 		delay(2);
   12817 
   12818 		/* Shift in the READ command. */
   12819 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12820 
   12821 		/* Shift in address. */
   12822 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12823 
   12824 		/* Shift out the data. */
   12825 		wm_eeprom_recvbits(sc, &val, 16);
   12826 		data[i] = val & 0xffff;
   12827 
   12828 		/* Clear CHIP SELECT. */
   12829 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12830 		CSR_WRITE(sc, WMREG_EECD, reg);
   12831 		CSR_WRITE_FLUSH(sc);
   12832 		delay(2);
   12833 	}
   12834 
   12835 	sc->nvm.release(sc);
   12836 	return 0;
   12837 }
   12838 
   12839 /* SPI */
   12840 
   12841 /*
   12842  * Set SPI and FLASH related information from the EECD register.
   12843  * For 82541 and 82547, the word size is taken from EEPROM.
   12844  */
   12845 static int
   12846 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12847 {
   12848 	int size;
   12849 	uint32_t reg;
   12850 	uint16_t data;
   12851 
   12852 	reg = CSR_READ(sc, WMREG_EECD);
   12853 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12854 
   12855 	/* Read the size of NVM from EECD by default */
   12856 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12857 	switch (sc->sc_type) {
   12858 	case WM_T_82541:
   12859 	case WM_T_82541_2:
   12860 	case WM_T_82547:
   12861 	case WM_T_82547_2:
   12862 		/* Set dummy value to access EEPROM */
   12863 		sc->sc_nvm_wordsize = 64;
   12864 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12865 			aprint_error_dev(sc->sc_dev,
   12866 			    "%s: failed to read EEPROM size\n", __func__);
   12867 		}
   12868 		reg = data;
   12869 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12870 		if (size == 0)
   12871 			size = 6; /* 64 word size */
   12872 		else
   12873 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12874 		break;
   12875 	case WM_T_80003:
   12876 	case WM_T_82571:
   12877 	case WM_T_82572:
   12878 	case WM_T_82573: /* SPI case */
   12879 	case WM_T_82574: /* SPI case */
   12880 	case WM_T_82583: /* SPI case */
   12881 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12882 		if (size > 14)
   12883 			size = 14;
   12884 		break;
   12885 	case WM_T_82575:
   12886 	case WM_T_82576:
   12887 	case WM_T_82580:
   12888 	case WM_T_I350:
   12889 	case WM_T_I354:
   12890 	case WM_T_I210:
   12891 	case WM_T_I211:
   12892 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12893 		if (size > 15)
   12894 			size = 15;
   12895 		break;
   12896 	default:
   12897 		aprint_error_dev(sc->sc_dev,
   12898 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12899 		return -1;
   12900 		break;
   12901 	}
   12902 
   12903 	sc->sc_nvm_wordsize = 1 << size;
   12904 
   12905 	return 0;
   12906 }
   12907 
   12908 /*
   12909  * wm_nvm_ready_spi:
   12910  *
   12911  *	Wait for a SPI EEPROM to be ready for commands.
   12912  */
   12913 static int
   12914 wm_nvm_ready_spi(struct wm_softc *sc)
   12915 {
   12916 	uint32_t val;
   12917 	int usec;
   12918 
   12919 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12920 		device_xname(sc->sc_dev), __func__));
   12921 
   12922 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12923 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12924 		wm_eeprom_recvbits(sc, &val, 8);
   12925 		if ((val & SPI_SR_RDY) == 0)
   12926 			break;
   12927 	}
   12928 	if (usec >= SPI_MAX_RETRIES) {
   12929 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12930 		return -1;
   12931 	}
   12932 	return 0;
   12933 }
   12934 
   12935 /*
   12936  * wm_nvm_read_spi:
   12937  *
   12938  *	Read a work from the EEPROM using the SPI protocol.
   12939  */
   12940 static int
   12941 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12942 {
   12943 	uint32_t reg, val;
   12944 	int i;
   12945 	uint8_t opc;
   12946 	int rv = 0;
   12947 
   12948 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12949 		device_xname(sc->sc_dev), __func__));
   12950 
   12951 	if (sc->nvm.acquire(sc) != 0)
   12952 		return -1;
   12953 
   12954 	/* Clear SK and CS. */
   12955 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12956 	CSR_WRITE(sc, WMREG_EECD, reg);
   12957 	CSR_WRITE_FLUSH(sc);
   12958 	delay(2);
   12959 
   12960 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12961 		goto out;
   12962 
   12963 	/* Toggle CS to flush commands. */
   12964 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12965 	CSR_WRITE_FLUSH(sc);
   12966 	delay(2);
   12967 	CSR_WRITE(sc, WMREG_EECD, reg);
   12968 	CSR_WRITE_FLUSH(sc);
   12969 	delay(2);
   12970 
   12971 	opc = SPI_OPC_READ;
   12972 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12973 		opc |= SPI_OPC_A8;
   12974 
   12975 	wm_eeprom_sendbits(sc, opc, 8);
   12976 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12977 
   12978 	for (i = 0; i < wordcnt; i++) {
   12979 		wm_eeprom_recvbits(sc, &val, 16);
   12980 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12981 	}
   12982 
   12983 	/* Raise CS and clear SK. */
   12984 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12985 	CSR_WRITE(sc, WMREG_EECD, reg);
   12986 	CSR_WRITE_FLUSH(sc);
   12987 	delay(2);
   12988 
   12989 out:
   12990 	sc->nvm.release(sc);
   12991 	return rv;
   12992 }
   12993 
   12994 /* Using with EERD */
   12995 
   12996 static int
   12997 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12998 {
   12999 	uint32_t attempts = 100000;
   13000 	uint32_t i, reg = 0;
   13001 	int32_t done = -1;
   13002 
   13003 	for (i = 0; i < attempts; i++) {
   13004 		reg = CSR_READ(sc, rw);
   13005 
   13006 		if (reg & EERD_DONE) {
   13007 			done = 0;
   13008 			break;
   13009 		}
   13010 		delay(5);
   13011 	}
   13012 
   13013 	return done;
   13014 }
   13015 
   13016 static int
   13017 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13018 {
   13019 	int i, eerd = 0;
   13020 	int rv = 0;
   13021 
   13022 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13023 		device_xname(sc->sc_dev), __func__));
   13024 
   13025 	if (sc->nvm.acquire(sc) != 0)
   13026 		return -1;
   13027 
   13028 	for (i = 0; i < wordcnt; i++) {
   13029 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13030 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13031 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13032 		if (rv != 0) {
   13033 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13034 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13035 			break;
   13036 		}
   13037 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13038 	}
   13039 
   13040 	sc->nvm.release(sc);
   13041 	return rv;
   13042 }
   13043 
   13044 /* Flash */
   13045 
   13046 static int
   13047 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13048 {
   13049 	uint32_t eecd;
   13050 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13051 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13052 	uint32_t nvm_dword = 0;
   13053 	uint8_t sig_byte = 0;
   13054 	int rv;
   13055 
   13056 	switch (sc->sc_type) {
   13057 	case WM_T_PCH_SPT:
   13058 	case WM_T_PCH_CNP:
   13059 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13060 		act_offset = ICH_NVM_SIG_WORD * 2;
   13061 
   13062 		/* Set bank to 0 in case flash read fails. */
   13063 		*bank = 0;
   13064 
   13065 		/* Check bank 0 */
   13066 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13067 		if (rv != 0)
   13068 			return rv;
   13069 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13070 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13071 			*bank = 0;
   13072 			return 0;
   13073 		}
   13074 
   13075 		/* Check bank 1 */
   13076 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13077 		    &nvm_dword);
   13078 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13079 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13080 			*bank = 1;
   13081 			return 0;
   13082 		}
   13083 		aprint_error_dev(sc->sc_dev,
   13084 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13085 		return -1;
   13086 	case WM_T_ICH8:
   13087 	case WM_T_ICH9:
   13088 		eecd = CSR_READ(sc, WMREG_EECD);
   13089 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13090 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13091 			return 0;
   13092 		}
   13093 		/* FALLTHROUGH */
   13094 	default:
   13095 		/* Default to 0 */
   13096 		*bank = 0;
   13097 
   13098 		/* Check bank 0 */
   13099 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13100 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13101 			*bank = 0;
   13102 			return 0;
   13103 		}
   13104 
   13105 		/* Check bank 1 */
   13106 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13107 		    &sig_byte);
   13108 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13109 			*bank = 1;
   13110 			return 0;
   13111 		}
   13112 	}
   13113 
   13114 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13115 		device_xname(sc->sc_dev)));
   13116 	return -1;
   13117 }
   13118 
   13119 /******************************************************************************
   13120  * This function does initial flash setup so that a new read/write/erase cycle
   13121  * can be started.
   13122  *
   13123  * sc - The pointer to the hw structure
   13124  ****************************************************************************/
   13125 static int32_t
   13126 wm_ich8_cycle_init(struct wm_softc *sc)
   13127 {
   13128 	uint16_t hsfsts;
   13129 	int32_t error = 1;
   13130 	int32_t i     = 0;
   13131 
   13132 	if (sc->sc_type >= WM_T_PCH_SPT)
   13133 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13134 	else
   13135 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13136 
   13137 	/* May be check the Flash Des Valid bit in Hw status */
   13138 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13139 		return error;
   13140 
   13141 	/* Clear FCERR in Hw status by writing 1 */
   13142 	/* Clear DAEL in Hw status by writing a 1 */
   13143 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13144 
   13145 	if (sc->sc_type >= WM_T_PCH_SPT)
   13146 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13147 	else
   13148 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13149 
   13150 	/*
   13151 	 * Either we should have a hardware SPI cycle in progress bit to check
   13152 	 * against, in order to start a new cycle or FDONE bit should be
   13153 	 * changed in the hardware so that it is 1 after hardware reset, which
   13154 	 * can then be used as an indication whether a cycle is in progress or
   13155 	 * has been completed .. we should also have some software semaphore
   13156 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13157 	 * threads access to those bits can be sequentiallized or a way so that
   13158 	 * 2 threads don't start the cycle at the same time
   13159 	 */
   13160 
   13161 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13162 		/*
   13163 		 * There is no cycle running at present, so we can start a
   13164 		 * cycle
   13165 		 */
   13166 
   13167 		/* Begin by setting Flash Cycle Done. */
   13168 		hsfsts |= HSFSTS_DONE;
   13169 		if (sc->sc_type >= WM_T_PCH_SPT)
   13170 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13171 			    hsfsts & 0xffffUL);
   13172 		else
   13173 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13174 		error = 0;
   13175 	} else {
   13176 		/*
   13177 		 * Otherwise poll for sometime so the current cycle has a
   13178 		 * chance to end before giving up.
   13179 		 */
   13180 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13181 			if (sc->sc_type >= WM_T_PCH_SPT)
   13182 				hsfsts = ICH8_FLASH_READ32(sc,
   13183 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13184 			else
   13185 				hsfsts = ICH8_FLASH_READ16(sc,
   13186 				    ICH_FLASH_HSFSTS);
   13187 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13188 				error = 0;
   13189 				break;
   13190 			}
   13191 			delay(1);
   13192 		}
   13193 		if (error == 0) {
   13194 			/*
   13195 			 * Successful in waiting for previous cycle to timeout,
   13196 			 * now set the Flash Cycle Done.
   13197 			 */
   13198 			hsfsts |= HSFSTS_DONE;
   13199 			if (sc->sc_type >= WM_T_PCH_SPT)
   13200 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13201 				    hsfsts & 0xffffUL);
   13202 			else
   13203 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13204 				    hsfsts);
   13205 		}
   13206 	}
   13207 	return error;
   13208 }
   13209 
   13210 /******************************************************************************
   13211  * This function starts a flash cycle and waits for its completion
   13212  *
   13213  * sc - The pointer to the hw structure
   13214  ****************************************************************************/
   13215 static int32_t
   13216 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13217 {
   13218 	uint16_t hsflctl;
   13219 	uint16_t hsfsts;
   13220 	int32_t error = 1;
   13221 	uint32_t i = 0;
   13222 
   13223 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13224 	if (sc->sc_type >= WM_T_PCH_SPT)
   13225 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13226 	else
   13227 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13228 	hsflctl |= HSFCTL_GO;
   13229 	if (sc->sc_type >= WM_T_PCH_SPT)
   13230 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13231 		    (uint32_t)hsflctl << 16);
   13232 	else
   13233 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13234 
   13235 	/* Wait till FDONE bit is set to 1 */
   13236 	do {
   13237 		if (sc->sc_type >= WM_T_PCH_SPT)
   13238 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13239 			    & 0xffffUL;
   13240 		else
   13241 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13242 		if (hsfsts & HSFSTS_DONE)
   13243 			break;
   13244 		delay(1);
   13245 		i++;
   13246 	} while (i < timeout);
   13247 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13248 		error = 0;
   13249 
   13250 	return error;
   13251 }
   13252 
   13253 /******************************************************************************
   13254  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13255  *
   13256  * sc - The pointer to the hw structure
   13257  * index - The index of the byte or word to read.
   13258  * size - Size of data to read, 1=byte 2=word, 4=dword
   13259  * data - Pointer to the word to store the value read.
   13260  *****************************************************************************/
   13261 static int32_t
   13262 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13263     uint32_t size, uint32_t *data)
   13264 {
   13265 	uint16_t hsfsts;
   13266 	uint16_t hsflctl;
   13267 	uint32_t flash_linear_address;
   13268 	uint32_t flash_data = 0;
   13269 	int32_t error = 1;
   13270 	int32_t count = 0;
   13271 
   13272 	if (size < 1  || size > 4 || data == 0x0 ||
   13273 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13274 		return error;
   13275 
   13276 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13277 	    sc->sc_ich8_flash_base;
   13278 
   13279 	do {
   13280 		delay(1);
   13281 		/* Steps */
   13282 		error = wm_ich8_cycle_init(sc);
   13283 		if (error)
   13284 			break;
   13285 
   13286 		if (sc->sc_type >= WM_T_PCH_SPT)
   13287 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13288 			    >> 16;
   13289 		else
   13290 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13291 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13292 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13293 		    & HSFCTL_BCOUNT_MASK;
   13294 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13295 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13296 			/*
   13297 			 * In SPT, This register is in Lan memory space, not
   13298 			 * flash. Therefore, only 32 bit access is supported.
   13299 			 */
   13300 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13301 			    (uint32_t)hsflctl << 16);
   13302 		} else
   13303 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13304 
   13305 		/*
   13306 		 * Write the last 24 bits of index into Flash Linear address
   13307 		 * field in Flash Address
   13308 		 */
   13309 		/* TODO: TBD maybe check the index against the size of flash */
   13310 
   13311 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13312 
   13313 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13314 
   13315 		/*
   13316 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13317 		 * the whole sequence a few more times, else read in (shift in)
   13318 		 * the Flash Data0, the order is least significant byte first
   13319 		 * msb to lsb
   13320 		 */
   13321 		if (error == 0) {
   13322 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13323 			if (size == 1)
   13324 				*data = (uint8_t)(flash_data & 0x000000FF);
   13325 			else if (size == 2)
   13326 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13327 			else if (size == 4)
   13328 				*data = (uint32_t)flash_data;
   13329 			break;
   13330 		} else {
   13331 			/*
   13332 			 * If we've gotten here, then things are probably
   13333 			 * completely hosed, but if the error condition is
   13334 			 * detected, it won't hurt to give it another try...
   13335 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13336 			 */
   13337 			if (sc->sc_type >= WM_T_PCH_SPT)
   13338 				hsfsts = ICH8_FLASH_READ32(sc,
   13339 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13340 			else
   13341 				hsfsts = ICH8_FLASH_READ16(sc,
   13342 				    ICH_FLASH_HSFSTS);
   13343 
   13344 			if (hsfsts & HSFSTS_ERR) {
   13345 				/* Repeat for some time before giving up. */
   13346 				continue;
   13347 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13348 				break;
   13349 		}
   13350 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13351 
   13352 	return error;
   13353 }
   13354 
   13355 /******************************************************************************
   13356  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13357  *
   13358  * sc - pointer to wm_hw structure
   13359  * index - The index of the byte to read.
   13360  * data - Pointer to a byte to store the value read.
   13361  *****************************************************************************/
   13362 static int32_t
   13363 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13364 {
   13365 	int32_t status;
   13366 	uint32_t word = 0;
   13367 
   13368 	status = wm_read_ich8_data(sc, index, 1, &word);
   13369 	if (status == 0)
   13370 		*data = (uint8_t)word;
   13371 	else
   13372 		*data = 0;
   13373 
   13374 	return status;
   13375 }
   13376 
   13377 /******************************************************************************
   13378  * Reads a word from the NVM using the ICH8 flash access registers.
   13379  *
   13380  * sc - pointer to wm_hw structure
   13381  * index - The starting byte index of the word to read.
   13382  * data - Pointer to a word to store the value read.
   13383  *****************************************************************************/
   13384 static int32_t
   13385 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13386 {
   13387 	int32_t status;
   13388 	uint32_t word = 0;
   13389 
   13390 	status = wm_read_ich8_data(sc, index, 2, &word);
   13391 	if (status == 0)
   13392 		*data = (uint16_t)word;
   13393 	else
   13394 		*data = 0;
   13395 
   13396 	return status;
   13397 }
   13398 
   13399 /******************************************************************************
   13400  * Reads a dword from the NVM using the ICH8 flash access registers.
   13401  *
   13402  * sc - pointer to wm_hw structure
   13403  * index - The starting byte index of the word to read.
   13404  * data - Pointer to a word to store the value read.
   13405  *****************************************************************************/
   13406 static int32_t
   13407 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13408 {
   13409 	int32_t status;
   13410 
   13411 	status = wm_read_ich8_data(sc, index, 4, data);
   13412 	return status;
   13413 }
   13414 
   13415 /******************************************************************************
   13416  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13417  * register.
   13418  *
   13419  * sc - Struct containing variables accessed by shared code
   13420  * offset - offset of word in the EEPROM to read
   13421  * data - word read from the EEPROM
   13422  * words - number of words to read
   13423  *****************************************************************************/
   13424 static int
   13425 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13426 {
   13427 	int32_t	 rv = 0;
   13428 	uint32_t flash_bank = 0;
   13429 	uint32_t act_offset = 0;
   13430 	uint32_t bank_offset = 0;
   13431 	uint16_t word = 0;
   13432 	uint16_t i = 0;
   13433 
   13434 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13435 		device_xname(sc->sc_dev), __func__));
   13436 
   13437 	if (sc->nvm.acquire(sc) != 0)
   13438 		return -1;
   13439 
   13440 	/*
   13441 	 * We need to know which is the valid flash bank.  In the event
   13442 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13443 	 * managing flash_bank. So it cannot be trusted and needs
   13444 	 * to be updated with each read.
   13445 	 */
   13446 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13447 	if (rv) {
   13448 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13449 			device_xname(sc->sc_dev)));
   13450 		flash_bank = 0;
   13451 	}
   13452 
   13453 	/*
   13454 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13455 	 * size
   13456 	 */
   13457 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13458 
   13459 	for (i = 0; i < words; i++) {
   13460 		/* The NVM part needs a byte offset, hence * 2 */
   13461 		act_offset = bank_offset + ((offset + i) * 2);
   13462 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13463 		if (rv) {
   13464 			aprint_error_dev(sc->sc_dev,
   13465 			    "%s: failed to read NVM\n", __func__);
   13466 			break;
   13467 		}
   13468 		data[i] = word;
   13469 	}
   13470 
   13471 	sc->nvm.release(sc);
   13472 	return rv;
   13473 }
   13474 
   13475 /******************************************************************************
   13476  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13477  * register.
   13478  *
   13479  * sc - Struct containing variables accessed by shared code
   13480  * offset - offset of word in the EEPROM to read
   13481  * data - word read from the EEPROM
   13482  * words - number of words to read
   13483  *****************************************************************************/
   13484 static int
   13485 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13486 {
   13487 	int32_t	 rv = 0;
   13488 	uint32_t flash_bank = 0;
   13489 	uint32_t act_offset = 0;
   13490 	uint32_t bank_offset = 0;
   13491 	uint32_t dword = 0;
   13492 	uint16_t i = 0;
   13493 
   13494 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13495 		device_xname(sc->sc_dev), __func__));
   13496 
   13497 	if (sc->nvm.acquire(sc) != 0)
   13498 		return -1;
   13499 
   13500 	/*
   13501 	 * We need to know which is the valid flash bank.  In the event
   13502 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13503 	 * managing flash_bank. So it cannot be trusted and needs
   13504 	 * to be updated with each read.
   13505 	 */
   13506 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13507 	if (rv) {
   13508 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13509 			device_xname(sc->sc_dev)));
   13510 		flash_bank = 0;
   13511 	}
   13512 
   13513 	/*
   13514 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13515 	 * size
   13516 	 */
   13517 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13518 
   13519 	for (i = 0; i < words; i++) {
   13520 		/* The NVM part needs a byte offset, hence * 2 */
   13521 		act_offset = bank_offset + ((offset + i) * 2);
   13522 		/* but we must read dword aligned, so mask ... */
   13523 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13524 		if (rv) {
   13525 			aprint_error_dev(sc->sc_dev,
   13526 			    "%s: failed to read NVM\n", __func__);
   13527 			break;
   13528 		}
   13529 		/* ... and pick out low or high word */
   13530 		if ((act_offset & 0x2) == 0)
   13531 			data[i] = (uint16_t)(dword & 0xFFFF);
   13532 		else
   13533 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13534 	}
   13535 
   13536 	sc->nvm.release(sc);
   13537 	return rv;
   13538 }
   13539 
   13540 /* iNVM */
   13541 
   13542 static int
   13543 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13544 {
   13545 	int32_t	 rv = 0;
   13546 	uint32_t invm_dword;
   13547 	uint16_t i;
   13548 	uint8_t record_type, word_address;
   13549 
   13550 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13551 		device_xname(sc->sc_dev), __func__));
   13552 
   13553 	for (i = 0; i < INVM_SIZE; i++) {
   13554 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13555 		/* Get record type */
   13556 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13557 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13558 			break;
   13559 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13560 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13561 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13562 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13563 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13564 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13565 			if (word_address == address) {
   13566 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13567 				rv = 0;
   13568 				break;
   13569 			}
   13570 		}
   13571 	}
   13572 
   13573 	return rv;
   13574 }
   13575 
   13576 static int
   13577 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13578 {
   13579 	int rv = 0;
   13580 	int i;
   13581 
   13582 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13583 		device_xname(sc->sc_dev), __func__));
   13584 
   13585 	if (sc->nvm.acquire(sc) != 0)
   13586 		return -1;
   13587 
   13588 	for (i = 0; i < words; i++) {
   13589 		switch (offset + i) {
   13590 		case NVM_OFF_MACADDR:
   13591 		case NVM_OFF_MACADDR1:
   13592 		case NVM_OFF_MACADDR2:
   13593 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13594 			if (rv != 0) {
   13595 				data[i] = 0xffff;
   13596 				rv = -1;
   13597 			}
   13598 			break;
   13599 		case NVM_OFF_CFG2:
   13600 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13601 			if (rv != 0) {
   13602 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13603 				rv = 0;
   13604 			}
   13605 			break;
   13606 		case NVM_OFF_CFG4:
   13607 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13608 			if (rv != 0) {
   13609 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13610 				rv = 0;
   13611 			}
   13612 			break;
   13613 		case NVM_OFF_LED_1_CFG:
   13614 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13615 			if (rv != 0) {
   13616 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13617 				rv = 0;
   13618 			}
   13619 			break;
   13620 		case NVM_OFF_LED_0_2_CFG:
   13621 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13622 			if (rv != 0) {
   13623 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13624 				rv = 0;
   13625 			}
   13626 			break;
   13627 		case NVM_OFF_ID_LED_SETTINGS:
   13628 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13629 			if (rv != 0) {
   13630 				*data = ID_LED_RESERVED_FFFF;
   13631 				rv = 0;
   13632 			}
   13633 			break;
   13634 		default:
   13635 			DPRINTF(WM_DEBUG_NVM,
   13636 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13637 			*data = NVM_RESERVED_WORD;
   13638 			break;
   13639 		}
   13640 	}
   13641 
   13642 	sc->nvm.release(sc);
   13643 	return rv;
   13644 }
   13645 
   13646 /* Lock, detecting NVM type, validate checksum, version and read */
   13647 
   13648 static int
   13649 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13650 {
   13651 	uint32_t eecd = 0;
   13652 
   13653 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13654 	    || sc->sc_type == WM_T_82583) {
   13655 		eecd = CSR_READ(sc, WMREG_EECD);
   13656 
   13657 		/* Isolate bits 15 & 16 */
   13658 		eecd = ((eecd >> 15) & 0x03);
   13659 
   13660 		/* If both bits are set, device is Flash type */
   13661 		if (eecd == 0x03)
   13662 			return 0;
   13663 	}
   13664 	return 1;
   13665 }
   13666 
   13667 static int
   13668 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13669 {
   13670 	uint32_t eec;
   13671 
   13672 	eec = CSR_READ(sc, WMREG_EEC);
   13673 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13674 		return 1;
   13675 
   13676 	return 0;
   13677 }
   13678 
   13679 /*
   13680  * wm_nvm_validate_checksum
   13681  *
   13682  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13683  */
   13684 static int
   13685 wm_nvm_validate_checksum(struct wm_softc *sc)
   13686 {
   13687 	uint16_t checksum;
   13688 	uint16_t eeprom_data;
   13689 #ifdef WM_DEBUG
   13690 	uint16_t csum_wordaddr, valid_checksum;
   13691 #endif
   13692 	int i;
   13693 
   13694 	checksum = 0;
   13695 
   13696 	/* Don't check for I211 */
   13697 	if (sc->sc_type == WM_T_I211)
   13698 		return 0;
   13699 
   13700 #ifdef WM_DEBUG
   13701 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13702 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13703 		csum_wordaddr = NVM_OFF_COMPAT;
   13704 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13705 	} else {
   13706 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13707 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13708 	}
   13709 
   13710 	/* Dump EEPROM image for debug */
   13711 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13712 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13713 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13714 		/* XXX PCH_SPT? */
   13715 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13716 		if ((eeprom_data & valid_checksum) == 0)
   13717 			DPRINTF(WM_DEBUG_NVM,
   13718 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13719 				device_xname(sc->sc_dev), eeprom_data,
   13720 				    valid_checksum));
   13721 	}
   13722 
   13723 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13724 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13725 		for (i = 0; i < NVM_SIZE; i++) {
   13726 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13727 				printf("XXXX ");
   13728 			else
   13729 				printf("%04hx ", eeprom_data);
   13730 			if (i % 8 == 7)
   13731 				printf("\n");
   13732 		}
   13733 	}
   13734 
   13735 #endif /* WM_DEBUG */
   13736 
   13737 	for (i = 0; i < NVM_SIZE; i++) {
   13738 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13739 			return 1;
   13740 		checksum += eeprom_data;
   13741 	}
   13742 
   13743 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13744 #ifdef WM_DEBUG
   13745 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13746 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13747 #endif
   13748 	}
   13749 
   13750 	return 0;
   13751 }
   13752 
   13753 static void
   13754 wm_nvm_version_invm(struct wm_softc *sc)
   13755 {
   13756 	uint32_t dword;
   13757 
   13758 	/*
   13759 	 * Linux's code to decode version is very strange, so we don't
   13760 	 * obey that algorithm and just use word 61 as the document.
   13761 	 * Perhaps it's not perfect though...
   13762 	 *
   13763 	 * Example:
   13764 	 *
   13765 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13766 	 */
   13767 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13768 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13769 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13770 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13771 }
   13772 
   13773 static void
   13774 wm_nvm_version(struct wm_softc *sc)
   13775 {
   13776 	uint16_t major, minor, build, patch;
   13777 	uint16_t uid0, uid1;
   13778 	uint16_t nvm_data;
   13779 	uint16_t off;
   13780 	bool check_version = false;
   13781 	bool check_optionrom = false;
   13782 	bool have_build = false;
   13783 	bool have_uid = true;
   13784 
   13785 	/*
   13786 	 * Version format:
   13787 	 *
   13788 	 * XYYZ
   13789 	 * X0YZ
   13790 	 * X0YY
   13791 	 *
   13792 	 * Example:
   13793 	 *
   13794 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13795 	 *	82571	0x50a6	5.10.6?
   13796 	 *	82572	0x506a	5.6.10?
   13797 	 *	82572EI	0x5069	5.6.9?
   13798 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13799 	 *		0x2013	2.1.3?
   13800 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   13801 	 * ICH8+82567	0x0040	0.4.0?
   13802 	 * ICH9+82566	0x1040	1.4.0?
   13803 	 *ICH10+82567	0x0043	0.4.3?
   13804 	 *  PCH+82577	0x00c1	0.12.1?
   13805 	 * PCH2+82579	0x00d3	0.13.3?
   13806 	 *		0x00d4	0.13.4?
   13807 	 *  LPT+I218	0x0023	0.2.3?
   13808 	 *  SPT+I219	0x0084	0.8.4?
   13809 	 *  CNP+I219	0x0054	0.5.4?
   13810 	 */
   13811 
   13812 	/*
   13813 	 * XXX
   13814 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13815 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13816 	 */
   13817 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13818 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13819 		have_uid = false;
   13820 
   13821 	switch (sc->sc_type) {
   13822 	case WM_T_82571:
   13823 	case WM_T_82572:
   13824 	case WM_T_82574:
   13825 	case WM_T_82583:
   13826 		check_version = true;
   13827 		check_optionrom = true;
   13828 		have_build = true;
   13829 		break;
   13830 	case WM_T_ICH8:
   13831 	case WM_T_ICH9:
   13832 	case WM_T_ICH10:
   13833 	case WM_T_PCH:
   13834 	case WM_T_PCH2:
   13835 	case WM_T_PCH_LPT:
   13836 	case WM_T_PCH_SPT:
   13837 	case WM_T_PCH_CNP:
   13838 		check_version = true;
   13839 		have_build = true;
   13840 		have_uid = false;
   13841 		break;
   13842 	case WM_T_82575:
   13843 	case WM_T_82576:
   13844 	case WM_T_82580:
   13845 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13846 			check_version = true;
   13847 		break;
   13848 	case WM_T_I211:
   13849 		wm_nvm_version_invm(sc);
   13850 		have_uid = false;
   13851 		goto printver;
   13852 	case WM_T_I210:
   13853 		if (!wm_nvm_flash_presence_i210(sc)) {
   13854 			wm_nvm_version_invm(sc);
   13855 			have_uid = false;
   13856 			goto printver;
   13857 		}
   13858 		/* FALLTHROUGH */
   13859 	case WM_T_I350:
   13860 	case WM_T_I354:
   13861 		check_version = true;
   13862 		check_optionrom = true;
   13863 		break;
   13864 	default:
   13865 		return;
   13866 	}
   13867 	if (check_version
   13868 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13869 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13870 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13871 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13872 			build = nvm_data & NVM_BUILD_MASK;
   13873 			have_build = true;
   13874 		} else
   13875 			minor = nvm_data & 0x00ff;
   13876 
   13877 		/* Decimal */
   13878 		minor = (minor / 16) * 10 + (minor % 16);
   13879 		sc->sc_nvm_ver_major = major;
   13880 		sc->sc_nvm_ver_minor = minor;
   13881 
   13882 printver:
   13883 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13884 		    sc->sc_nvm_ver_minor);
   13885 		if (have_build) {
   13886 			sc->sc_nvm_ver_build = build;
   13887 			aprint_verbose(".%d", build);
   13888 		}
   13889 	}
   13890 
   13891 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13892 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13893 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13894 		/* Option ROM Version */
   13895 		if ((off != 0x0000) && (off != 0xffff)) {
   13896 			int rv;
   13897 
   13898 			off += NVM_COMBO_VER_OFF;
   13899 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13900 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13901 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13902 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13903 				/* 16bits */
   13904 				major = uid0 >> 8;
   13905 				build = (uid0 << 8) | (uid1 >> 8);
   13906 				patch = uid1 & 0x00ff;
   13907 				aprint_verbose(", option ROM Version %d.%d.%d",
   13908 				    major, build, patch);
   13909 			}
   13910 		}
   13911 	}
   13912 
   13913 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13914 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13915 }
   13916 
   13917 /*
   13918  * wm_nvm_read:
   13919  *
   13920  *	Read data from the serial EEPROM.
   13921  */
   13922 static int
   13923 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13924 {
   13925 	int rv;
   13926 
   13927 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13928 		device_xname(sc->sc_dev), __func__));
   13929 
   13930 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13931 		return -1;
   13932 
   13933 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13934 
   13935 	return rv;
   13936 }
   13937 
   13938 /*
   13939  * Hardware semaphores.
   13940  * Very complexed...
   13941  */
   13942 
   13943 static int
   13944 wm_get_null(struct wm_softc *sc)
   13945 {
   13946 
   13947 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13948 		device_xname(sc->sc_dev), __func__));
   13949 	return 0;
   13950 }
   13951 
   13952 static void
   13953 wm_put_null(struct wm_softc *sc)
   13954 {
   13955 
   13956 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13957 		device_xname(sc->sc_dev), __func__));
   13958 	return;
   13959 }
   13960 
   13961 static int
   13962 wm_get_eecd(struct wm_softc *sc)
   13963 {
   13964 	uint32_t reg;
   13965 	int x;
   13966 
   13967 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13968 		device_xname(sc->sc_dev), __func__));
   13969 
   13970 	reg = CSR_READ(sc, WMREG_EECD);
   13971 
   13972 	/* Request EEPROM access. */
   13973 	reg |= EECD_EE_REQ;
   13974 	CSR_WRITE(sc, WMREG_EECD, reg);
   13975 
   13976 	/* ..and wait for it to be granted. */
   13977 	for (x = 0; x < 1000; x++) {
   13978 		reg = CSR_READ(sc, WMREG_EECD);
   13979 		if (reg & EECD_EE_GNT)
   13980 			break;
   13981 		delay(5);
   13982 	}
   13983 	if ((reg & EECD_EE_GNT) == 0) {
   13984 		aprint_error_dev(sc->sc_dev,
   13985 		    "could not acquire EEPROM GNT\n");
   13986 		reg &= ~EECD_EE_REQ;
   13987 		CSR_WRITE(sc, WMREG_EECD, reg);
   13988 		return -1;
   13989 	}
   13990 
   13991 	return 0;
   13992 }
   13993 
   13994 static void
   13995 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13996 {
   13997 
   13998 	*eecd |= EECD_SK;
   13999 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14000 	CSR_WRITE_FLUSH(sc);
   14001 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14002 		delay(1);
   14003 	else
   14004 		delay(50);
   14005 }
   14006 
   14007 static void
   14008 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14009 {
   14010 
   14011 	*eecd &= ~EECD_SK;
   14012 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14013 	CSR_WRITE_FLUSH(sc);
   14014 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14015 		delay(1);
   14016 	else
   14017 		delay(50);
   14018 }
   14019 
   14020 static void
   14021 wm_put_eecd(struct wm_softc *sc)
   14022 {
   14023 	uint32_t reg;
   14024 
   14025 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14026 		device_xname(sc->sc_dev), __func__));
   14027 
   14028 	/* Stop nvm */
   14029 	reg = CSR_READ(sc, WMREG_EECD);
   14030 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14031 		/* Pull CS high */
   14032 		reg |= EECD_CS;
   14033 		wm_nvm_eec_clock_lower(sc, &reg);
   14034 	} else {
   14035 		/* CS on Microwire is active-high */
   14036 		reg &= ~(EECD_CS | EECD_DI);
   14037 		CSR_WRITE(sc, WMREG_EECD, reg);
   14038 		wm_nvm_eec_clock_raise(sc, &reg);
   14039 		wm_nvm_eec_clock_lower(sc, &reg);
   14040 	}
   14041 
   14042 	reg = CSR_READ(sc, WMREG_EECD);
   14043 	reg &= ~EECD_EE_REQ;
   14044 	CSR_WRITE(sc, WMREG_EECD, reg);
   14045 
   14046 	return;
   14047 }
   14048 
   14049 /*
   14050  * Get hardware semaphore.
   14051  * Same as e1000_get_hw_semaphore_generic()
   14052  */
   14053 static int
   14054 wm_get_swsm_semaphore(struct wm_softc *sc)
   14055 {
   14056 	int32_t timeout;
   14057 	uint32_t swsm;
   14058 
   14059 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14060 		device_xname(sc->sc_dev), __func__));
   14061 	KASSERT(sc->sc_nvm_wordsize > 0);
   14062 
   14063 retry:
   14064 	/* Get the SW semaphore. */
   14065 	timeout = sc->sc_nvm_wordsize + 1;
   14066 	while (timeout) {
   14067 		swsm = CSR_READ(sc, WMREG_SWSM);
   14068 
   14069 		if ((swsm & SWSM_SMBI) == 0)
   14070 			break;
   14071 
   14072 		delay(50);
   14073 		timeout--;
   14074 	}
   14075 
   14076 	if (timeout == 0) {
   14077 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14078 			/*
   14079 			 * In rare circumstances, the SW semaphore may already
   14080 			 * be held unintentionally. Clear the semaphore once
   14081 			 * before giving up.
   14082 			 */
   14083 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14084 			wm_put_swsm_semaphore(sc);
   14085 			goto retry;
   14086 		}
   14087 		aprint_error_dev(sc->sc_dev,
   14088 		    "could not acquire SWSM SMBI\n");
   14089 		return 1;
   14090 	}
   14091 
   14092 	/* Get the FW semaphore. */
   14093 	timeout = sc->sc_nvm_wordsize + 1;
   14094 	while (timeout) {
   14095 		swsm = CSR_READ(sc, WMREG_SWSM);
   14096 		swsm |= SWSM_SWESMBI;
   14097 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14098 		/* If we managed to set the bit we got the semaphore. */
   14099 		swsm = CSR_READ(sc, WMREG_SWSM);
   14100 		if (swsm & SWSM_SWESMBI)
   14101 			break;
   14102 
   14103 		delay(50);
   14104 		timeout--;
   14105 	}
   14106 
   14107 	if (timeout == 0) {
   14108 		aprint_error_dev(sc->sc_dev,
   14109 		    "could not acquire SWSM SWESMBI\n");
   14110 		/* Release semaphores */
   14111 		wm_put_swsm_semaphore(sc);
   14112 		return 1;
   14113 	}
   14114 	return 0;
   14115 }
   14116 
   14117 /*
   14118  * Put hardware semaphore.
   14119  * Same as e1000_put_hw_semaphore_generic()
   14120  */
   14121 static void
   14122 wm_put_swsm_semaphore(struct wm_softc *sc)
   14123 {
   14124 	uint32_t swsm;
   14125 
   14126 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14127 		device_xname(sc->sc_dev), __func__));
   14128 
   14129 	swsm = CSR_READ(sc, WMREG_SWSM);
   14130 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14131 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14132 }
   14133 
   14134 /*
   14135  * Get SW/FW semaphore.
   14136  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14137  */
   14138 static int
   14139 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14140 {
   14141 	uint32_t swfw_sync;
   14142 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14143 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14144 	int timeout;
   14145 
   14146 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14147 		device_xname(sc->sc_dev), __func__));
   14148 
   14149 	if (sc->sc_type == WM_T_80003)
   14150 		timeout = 50;
   14151 	else
   14152 		timeout = 200;
   14153 
   14154 	while (timeout) {
   14155 		if (wm_get_swsm_semaphore(sc)) {
   14156 			aprint_error_dev(sc->sc_dev,
   14157 			    "%s: failed to get semaphore\n",
   14158 			    __func__);
   14159 			return 1;
   14160 		}
   14161 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14162 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14163 			swfw_sync |= swmask;
   14164 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14165 			wm_put_swsm_semaphore(sc);
   14166 			return 0;
   14167 		}
   14168 		wm_put_swsm_semaphore(sc);
   14169 		delay(5000);
   14170 		timeout--;
   14171 	}
   14172 	device_printf(sc->sc_dev,
   14173 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14174 	    mask, swfw_sync);
   14175 	return 1;
   14176 }
   14177 
   14178 static void
   14179 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14180 {
   14181 	uint32_t swfw_sync;
   14182 
   14183 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14184 		device_xname(sc->sc_dev), __func__));
   14185 
   14186 	while (wm_get_swsm_semaphore(sc) != 0)
   14187 		continue;
   14188 
   14189 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14190 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14191 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14192 
   14193 	wm_put_swsm_semaphore(sc);
   14194 }
   14195 
   14196 static int
   14197 wm_get_nvm_80003(struct wm_softc *sc)
   14198 {
   14199 	int rv;
   14200 
   14201 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14202 		device_xname(sc->sc_dev), __func__));
   14203 
   14204 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14205 		aprint_error_dev(sc->sc_dev,
   14206 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14207 		return rv;
   14208 	}
   14209 
   14210 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14211 	    && (rv = wm_get_eecd(sc)) != 0) {
   14212 		aprint_error_dev(sc->sc_dev,
   14213 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14214 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14215 		return rv;
   14216 	}
   14217 
   14218 	return 0;
   14219 }
   14220 
   14221 static void
   14222 wm_put_nvm_80003(struct wm_softc *sc)
   14223 {
   14224 
   14225 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14226 		device_xname(sc->sc_dev), __func__));
   14227 
   14228 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14229 		wm_put_eecd(sc);
   14230 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14231 }
   14232 
   14233 static int
   14234 wm_get_nvm_82571(struct wm_softc *sc)
   14235 {
   14236 	int rv;
   14237 
   14238 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14239 		device_xname(sc->sc_dev), __func__));
   14240 
   14241 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14242 		return rv;
   14243 
   14244 	switch (sc->sc_type) {
   14245 	case WM_T_82573:
   14246 		break;
   14247 	default:
   14248 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14249 			rv = wm_get_eecd(sc);
   14250 		break;
   14251 	}
   14252 
   14253 	if (rv != 0) {
   14254 		aprint_error_dev(sc->sc_dev,
   14255 		    "%s: failed to get semaphore\n",
   14256 		    __func__);
   14257 		wm_put_swsm_semaphore(sc);
   14258 	}
   14259 
   14260 	return rv;
   14261 }
   14262 
   14263 static void
   14264 wm_put_nvm_82571(struct wm_softc *sc)
   14265 {
   14266 
   14267 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14268 		device_xname(sc->sc_dev), __func__));
   14269 
   14270 	switch (sc->sc_type) {
   14271 	case WM_T_82573:
   14272 		break;
   14273 	default:
   14274 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14275 			wm_put_eecd(sc);
   14276 		break;
   14277 	}
   14278 
   14279 	wm_put_swsm_semaphore(sc);
   14280 }
   14281 
   14282 static int
   14283 wm_get_phy_82575(struct wm_softc *sc)
   14284 {
   14285 
   14286 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14287 		device_xname(sc->sc_dev), __func__));
   14288 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14289 }
   14290 
   14291 static void
   14292 wm_put_phy_82575(struct wm_softc *sc)
   14293 {
   14294 
   14295 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14296 		device_xname(sc->sc_dev), __func__));
   14297 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14298 }
   14299 
   14300 static int
   14301 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14302 {
   14303 	uint32_t ext_ctrl;
   14304 	int timeout = 200;
   14305 
   14306 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14307 		device_xname(sc->sc_dev), __func__));
   14308 
   14309 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14310 	for (timeout = 0; timeout < 200; timeout++) {
   14311 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14312 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14313 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14314 
   14315 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14316 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14317 			return 0;
   14318 		delay(5000);
   14319 	}
   14320 	device_printf(sc->sc_dev,
   14321 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14322 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14323 	return 1;
   14324 }
   14325 
   14326 static void
   14327 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14328 {
   14329 	uint32_t ext_ctrl;
   14330 
   14331 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14332 		device_xname(sc->sc_dev), __func__));
   14333 
   14334 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14335 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14336 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14337 
   14338 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14339 }
   14340 
   14341 static int
   14342 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14343 {
   14344 	uint32_t ext_ctrl;
   14345 	int timeout;
   14346 
   14347 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14348 		device_xname(sc->sc_dev), __func__));
   14349 	mutex_enter(sc->sc_ich_phymtx);
   14350 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14351 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14352 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14353 			break;
   14354 		delay(1000);
   14355 	}
   14356 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14357 		device_printf(sc->sc_dev,
   14358 		    "SW has already locked the resource\n");
   14359 		goto out;
   14360 	}
   14361 
   14362 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14363 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14364 	for (timeout = 0; timeout < 1000; timeout++) {
   14365 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14366 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14367 			break;
   14368 		delay(1000);
   14369 	}
   14370 	if (timeout >= 1000) {
   14371 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14372 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14373 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14374 		goto out;
   14375 	}
   14376 	return 0;
   14377 
   14378 out:
   14379 	mutex_exit(sc->sc_ich_phymtx);
   14380 	return 1;
   14381 }
   14382 
   14383 static void
   14384 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14385 {
   14386 	uint32_t ext_ctrl;
   14387 
   14388 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14389 		device_xname(sc->sc_dev), __func__));
   14390 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14391 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14392 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14393 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14394 	} else {
   14395 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14396 	}
   14397 
   14398 	mutex_exit(sc->sc_ich_phymtx);
   14399 }
   14400 
   14401 static int
   14402 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14403 {
   14404 
   14405 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14406 		device_xname(sc->sc_dev), __func__));
   14407 	mutex_enter(sc->sc_ich_nvmmtx);
   14408 
   14409 	return 0;
   14410 }
   14411 
   14412 static void
   14413 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14414 {
   14415 
   14416 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14417 		device_xname(sc->sc_dev), __func__));
   14418 	mutex_exit(sc->sc_ich_nvmmtx);
   14419 }
   14420 
   14421 static int
   14422 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14423 {
   14424 	int i = 0;
   14425 	uint32_t reg;
   14426 
   14427 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14428 		device_xname(sc->sc_dev), __func__));
   14429 
   14430 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14431 	do {
   14432 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14433 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14434 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14435 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14436 			break;
   14437 		delay(2*1000);
   14438 		i++;
   14439 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14440 
   14441 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14442 		wm_put_hw_semaphore_82573(sc);
   14443 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14444 		    device_xname(sc->sc_dev));
   14445 		return -1;
   14446 	}
   14447 
   14448 	return 0;
   14449 }
   14450 
   14451 static void
   14452 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14453 {
   14454 	uint32_t reg;
   14455 
   14456 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14457 		device_xname(sc->sc_dev), __func__));
   14458 
   14459 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14460 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14461 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14462 }
   14463 
   14464 /*
   14465  * Management mode and power management related subroutines.
   14466  * BMC, AMT, suspend/resume and EEE.
   14467  */
   14468 
   14469 #ifdef WM_WOL
   14470 static int
   14471 wm_check_mng_mode(struct wm_softc *sc)
   14472 {
   14473 	int rv;
   14474 
   14475 	switch (sc->sc_type) {
   14476 	case WM_T_ICH8:
   14477 	case WM_T_ICH9:
   14478 	case WM_T_ICH10:
   14479 	case WM_T_PCH:
   14480 	case WM_T_PCH2:
   14481 	case WM_T_PCH_LPT:
   14482 	case WM_T_PCH_SPT:
   14483 	case WM_T_PCH_CNP:
   14484 		rv = wm_check_mng_mode_ich8lan(sc);
   14485 		break;
   14486 	case WM_T_82574:
   14487 	case WM_T_82583:
   14488 		rv = wm_check_mng_mode_82574(sc);
   14489 		break;
   14490 	case WM_T_82571:
   14491 	case WM_T_82572:
   14492 	case WM_T_82573:
   14493 	case WM_T_80003:
   14494 		rv = wm_check_mng_mode_generic(sc);
   14495 		break;
   14496 	default:
   14497 		/* Noting to do */
   14498 		rv = 0;
   14499 		break;
   14500 	}
   14501 
   14502 	return rv;
   14503 }
   14504 
   14505 static int
   14506 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14507 {
   14508 	uint32_t fwsm;
   14509 
   14510 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14511 
   14512 	if (((fwsm & FWSM_FW_VALID) != 0)
   14513 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14514 		return 1;
   14515 
   14516 	return 0;
   14517 }
   14518 
   14519 static int
   14520 wm_check_mng_mode_82574(struct wm_softc *sc)
   14521 {
   14522 	uint16_t data;
   14523 
   14524 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14525 
   14526 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14527 		return 1;
   14528 
   14529 	return 0;
   14530 }
   14531 
   14532 static int
   14533 wm_check_mng_mode_generic(struct wm_softc *sc)
   14534 {
   14535 	uint32_t fwsm;
   14536 
   14537 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14538 
   14539 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14540 		return 1;
   14541 
   14542 	return 0;
   14543 }
   14544 #endif /* WM_WOL */
   14545 
   14546 static int
   14547 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14548 {
   14549 	uint32_t manc, fwsm, factps;
   14550 
   14551 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14552 		return 0;
   14553 
   14554 	manc = CSR_READ(sc, WMREG_MANC);
   14555 
   14556 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14557 		device_xname(sc->sc_dev), manc));
   14558 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14559 		return 0;
   14560 
   14561 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14562 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14563 		factps = CSR_READ(sc, WMREG_FACTPS);
   14564 		if (((factps & FACTPS_MNGCG) == 0)
   14565 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14566 			return 1;
   14567 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14568 		uint16_t data;
   14569 
   14570 		factps = CSR_READ(sc, WMREG_FACTPS);
   14571 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14572 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14573 			device_xname(sc->sc_dev), factps, data));
   14574 		if (((factps & FACTPS_MNGCG) == 0)
   14575 		    && ((data & NVM_CFG2_MNGM_MASK)
   14576 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14577 			return 1;
   14578 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14579 	    && ((manc & MANC_ASF_EN) == 0))
   14580 		return 1;
   14581 
   14582 	return 0;
   14583 }
   14584 
   14585 static bool
   14586 wm_phy_resetisblocked(struct wm_softc *sc)
   14587 {
   14588 	bool blocked = false;
   14589 	uint32_t reg;
   14590 	int i = 0;
   14591 
   14592 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14593 		device_xname(sc->sc_dev), __func__));
   14594 
   14595 	switch (sc->sc_type) {
   14596 	case WM_T_ICH8:
   14597 	case WM_T_ICH9:
   14598 	case WM_T_ICH10:
   14599 	case WM_T_PCH:
   14600 	case WM_T_PCH2:
   14601 	case WM_T_PCH_LPT:
   14602 	case WM_T_PCH_SPT:
   14603 	case WM_T_PCH_CNP:
   14604 		do {
   14605 			reg = CSR_READ(sc, WMREG_FWSM);
   14606 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14607 				blocked = true;
   14608 				delay(10*1000);
   14609 				continue;
   14610 			}
   14611 			blocked = false;
   14612 		} while (blocked && (i++ < 30));
   14613 		return blocked;
   14614 		break;
   14615 	case WM_T_82571:
   14616 	case WM_T_82572:
   14617 	case WM_T_82573:
   14618 	case WM_T_82574:
   14619 	case WM_T_82583:
   14620 	case WM_T_80003:
   14621 		reg = CSR_READ(sc, WMREG_MANC);
   14622 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14623 			return true;
   14624 		else
   14625 			return false;
   14626 		break;
   14627 	default:
   14628 		/* No problem */
   14629 		break;
   14630 	}
   14631 
   14632 	return false;
   14633 }
   14634 
   14635 static void
   14636 wm_get_hw_control(struct wm_softc *sc)
   14637 {
   14638 	uint32_t reg;
   14639 
   14640 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14641 		device_xname(sc->sc_dev), __func__));
   14642 
   14643 	if (sc->sc_type == WM_T_82573) {
   14644 		reg = CSR_READ(sc, WMREG_SWSM);
   14645 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14646 	} else if (sc->sc_type >= WM_T_82571) {
   14647 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14648 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14649 	}
   14650 }
   14651 
   14652 static void
   14653 wm_release_hw_control(struct wm_softc *sc)
   14654 {
   14655 	uint32_t reg;
   14656 
   14657 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14658 		device_xname(sc->sc_dev), __func__));
   14659 
   14660 	if (sc->sc_type == WM_T_82573) {
   14661 		reg = CSR_READ(sc, WMREG_SWSM);
   14662 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14663 	} else if (sc->sc_type >= WM_T_82571) {
   14664 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14665 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14666 	}
   14667 }
   14668 
   14669 static void
   14670 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14671 {
   14672 	uint32_t reg;
   14673 
   14674 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14675 		device_xname(sc->sc_dev), __func__));
   14676 
   14677 	if (sc->sc_type < WM_T_PCH2)
   14678 		return;
   14679 
   14680 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14681 
   14682 	if (gate)
   14683 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14684 	else
   14685 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14686 
   14687 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14688 }
   14689 
   14690 static int
   14691 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14692 {
   14693 	uint32_t fwsm, reg;
   14694 	int rv = 0;
   14695 
   14696 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14697 		device_xname(sc->sc_dev), __func__));
   14698 
   14699 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14700 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14701 
   14702 	/* Disable ULP */
   14703 	wm_ulp_disable(sc);
   14704 
   14705 	/* Acquire PHY semaphore */
   14706 	rv = sc->phy.acquire(sc);
   14707 	if (rv != 0) {
   14708 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14709 		device_xname(sc->sc_dev), __func__));
   14710 		return -1;
   14711 	}
   14712 
   14713 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14714 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14715 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14716 	 */
   14717 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14718 	switch (sc->sc_type) {
   14719 	case WM_T_PCH_LPT:
   14720 	case WM_T_PCH_SPT:
   14721 	case WM_T_PCH_CNP:
   14722 		if (wm_phy_is_accessible_pchlan(sc))
   14723 			break;
   14724 
   14725 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14726 		 * forcing MAC to SMBus mode first.
   14727 		 */
   14728 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14729 		reg |= CTRL_EXT_FORCE_SMBUS;
   14730 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14731 #if 0
   14732 		/* XXX Isn't this required??? */
   14733 		CSR_WRITE_FLUSH(sc);
   14734 #endif
   14735 		/* Wait 50 milliseconds for MAC to finish any retries
   14736 		 * that it might be trying to perform from previous
   14737 		 * attempts to acknowledge any phy read requests.
   14738 		 */
   14739 		delay(50 * 1000);
   14740 		/* FALLTHROUGH */
   14741 	case WM_T_PCH2:
   14742 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14743 			break;
   14744 		/* FALLTHROUGH */
   14745 	case WM_T_PCH:
   14746 		if (sc->sc_type == WM_T_PCH)
   14747 			if ((fwsm & FWSM_FW_VALID) != 0)
   14748 				break;
   14749 
   14750 		if (wm_phy_resetisblocked(sc) == true) {
   14751 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   14752 			break;
   14753 		}
   14754 
   14755 		/* Toggle LANPHYPC Value bit */
   14756 		wm_toggle_lanphypc_pch_lpt(sc);
   14757 
   14758 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14759 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14760 				break;
   14761 
   14762 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14763 			 * so ensure that the MAC is also out of SMBus mode
   14764 			 */
   14765 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14766 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14767 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14768 
   14769 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14770 				break;
   14771 			rv = -1;
   14772 		}
   14773 		break;
   14774 	default:
   14775 		break;
   14776 	}
   14777 
   14778 	/* Release semaphore */
   14779 	sc->phy.release(sc);
   14780 
   14781 	if (rv == 0) {
   14782 		/* Check to see if able to reset PHY.  Print error if not */
   14783 		if (wm_phy_resetisblocked(sc)) {
   14784 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14785 			goto out;
   14786 		}
   14787 
   14788 		/* Reset the PHY before any access to it.  Doing so, ensures
   14789 		 * that the PHY is in a known good state before we read/write
   14790 		 * PHY registers.  The generic reset is sufficient here,
   14791 		 * because we haven't determined the PHY type yet.
   14792 		 */
   14793 		if (wm_reset_phy(sc) != 0)
   14794 			goto out;
   14795 
   14796 		/* On a successful reset, possibly need to wait for the PHY
   14797 		 * to quiesce to an accessible state before returning control
   14798 		 * to the calling function.  If the PHY does not quiesce, then
   14799 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14800 		 *  the PHY is in.
   14801 		 */
   14802 		if (wm_phy_resetisblocked(sc))
   14803 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14804 	}
   14805 
   14806 out:
   14807 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14808 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14809 		delay(10*1000);
   14810 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14811 	}
   14812 
   14813 	return 0;
   14814 }
   14815 
   14816 static void
   14817 wm_init_manageability(struct wm_softc *sc)
   14818 {
   14819 
   14820 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14821 		device_xname(sc->sc_dev), __func__));
   14822 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14823 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14824 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14825 
   14826 		/* Disable hardware interception of ARP */
   14827 		manc &= ~MANC_ARP_EN;
   14828 
   14829 		/* Enable receiving management packets to the host */
   14830 		if (sc->sc_type >= WM_T_82571) {
   14831 			manc |= MANC_EN_MNG2HOST;
   14832 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14833 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14834 		}
   14835 
   14836 		CSR_WRITE(sc, WMREG_MANC, manc);
   14837 	}
   14838 }
   14839 
   14840 static void
   14841 wm_release_manageability(struct wm_softc *sc)
   14842 {
   14843 
   14844 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14845 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14846 
   14847 		manc |= MANC_ARP_EN;
   14848 		if (sc->sc_type >= WM_T_82571)
   14849 			manc &= ~MANC_EN_MNG2HOST;
   14850 
   14851 		CSR_WRITE(sc, WMREG_MANC, manc);
   14852 	}
   14853 }
   14854 
   14855 static void
   14856 wm_get_wakeup(struct wm_softc *sc)
   14857 {
   14858 
   14859 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14860 	switch (sc->sc_type) {
   14861 	case WM_T_82573:
   14862 	case WM_T_82583:
   14863 		sc->sc_flags |= WM_F_HAS_AMT;
   14864 		/* FALLTHROUGH */
   14865 	case WM_T_80003:
   14866 	case WM_T_82575:
   14867 	case WM_T_82576:
   14868 	case WM_T_82580:
   14869 	case WM_T_I350:
   14870 	case WM_T_I354:
   14871 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14872 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14873 		/* FALLTHROUGH */
   14874 	case WM_T_82541:
   14875 	case WM_T_82541_2:
   14876 	case WM_T_82547:
   14877 	case WM_T_82547_2:
   14878 	case WM_T_82571:
   14879 	case WM_T_82572:
   14880 	case WM_T_82574:
   14881 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14882 		break;
   14883 	case WM_T_ICH8:
   14884 	case WM_T_ICH9:
   14885 	case WM_T_ICH10:
   14886 	case WM_T_PCH:
   14887 	case WM_T_PCH2:
   14888 	case WM_T_PCH_LPT:
   14889 	case WM_T_PCH_SPT:
   14890 	case WM_T_PCH_CNP:
   14891 		sc->sc_flags |= WM_F_HAS_AMT;
   14892 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14893 		break;
   14894 	default:
   14895 		break;
   14896 	}
   14897 
   14898 	/* 1: HAS_MANAGE */
   14899 	if (wm_enable_mng_pass_thru(sc) != 0)
   14900 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14901 
   14902 	/*
   14903 	 * Note that the WOL flags is set after the resetting of the eeprom
   14904 	 * stuff
   14905 	 */
   14906 }
   14907 
   14908 /*
   14909  * Unconfigure Ultra Low Power mode.
   14910  * Only for I217 and newer (see below).
   14911  */
   14912 static int
   14913 wm_ulp_disable(struct wm_softc *sc)
   14914 {
   14915 	uint32_t reg;
   14916 	uint16_t phyreg;
   14917 	int i = 0, rv = 0;
   14918 
   14919 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14920 		device_xname(sc->sc_dev), __func__));
   14921 	/* Exclude old devices */
   14922 	if ((sc->sc_type < WM_T_PCH_LPT)
   14923 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14924 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14925 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14926 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14927 		return 0;
   14928 
   14929 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14930 		/* Request ME un-configure ULP mode in the PHY */
   14931 		reg = CSR_READ(sc, WMREG_H2ME);
   14932 		reg &= ~H2ME_ULP;
   14933 		reg |= H2ME_ENFORCE_SETTINGS;
   14934 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14935 
   14936 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14937 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14938 			if (i++ == 30) {
   14939 				device_printf(sc->sc_dev, "%s timed out\n",
   14940 				    __func__);
   14941 				return -1;
   14942 			}
   14943 			delay(10 * 1000);
   14944 		}
   14945 		reg = CSR_READ(sc, WMREG_H2ME);
   14946 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14947 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14948 
   14949 		return 0;
   14950 	}
   14951 
   14952 	/* Acquire semaphore */
   14953 	rv = sc->phy.acquire(sc);
   14954 	if (rv != 0) {
   14955 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14956 		device_xname(sc->sc_dev), __func__));
   14957 		return -1;
   14958 	}
   14959 
   14960 	/* Toggle LANPHYPC */
   14961 	wm_toggle_lanphypc_pch_lpt(sc);
   14962 
   14963 	/* Unforce SMBus mode in PHY */
   14964 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14965 	if (rv != 0) {
   14966 		uint32_t reg2;
   14967 
   14968 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   14969 			__func__);
   14970 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14971 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14972 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14973 		delay(50 * 1000);
   14974 
   14975 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14976 		    &phyreg);
   14977 		if (rv != 0)
   14978 			goto release;
   14979 	}
   14980 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14981 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14982 
   14983 	/* Unforce SMBus mode in MAC */
   14984 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14985 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14986 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14987 
   14988 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14989 	if (rv != 0)
   14990 		goto release;
   14991 	phyreg |= HV_PM_CTRL_K1_ENA;
   14992 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14993 
   14994 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14995 		&phyreg);
   14996 	if (rv != 0)
   14997 		goto release;
   14998 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14999 	    | I218_ULP_CONFIG1_STICKY_ULP
   15000 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15001 	    | I218_ULP_CONFIG1_WOL_HOST
   15002 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15003 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15004 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15005 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15006 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15007 	phyreg |= I218_ULP_CONFIG1_START;
   15008 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15009 
   15010 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15011 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15012 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15013 
   15014 release:
   15015 	/* Release semaphore */
   15016 	sc->phy.release(sc);
   15017 	wm_gmii_reset(sc);
   15018 	delay(50 * 1000);
   15019 
   15020 	return rv;
   15021 }
   15022 
   15023 /* WOL in the newer chipset interfaces (pchlan) */
   15024 static int
   15025 wm_enable_phy_wakeup(struct wm_softc *sc)
   15026 {
   15027 	device_t dev = sc->sc_dev;
   15028 	uint32_t mreg, moff;
   15029 	uint16_t wuce, wuc, wufc, preg;
   15030 	int i, rv;
   15031 
   15032 	KASSERT(sc->sc_type >= WM_T_PCH);
   15033 
   15034 	/* Copy MAC RARs to PHY RARs */
   15035 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15036 
   15037 	/* Activate PHY wakeup */
   15038 	rv = sc->phy.acquire(sc);
   15039 	if (rv != 0) {
   15040 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15041 		    __func__);
   15042 		return rv;
   15043 	}
   15044 
   15045 	/*
   15046 	 * Enable access to PHY wakeup registers.
   15047 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15048 	 */
   15049 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15050 	if (rv != 0) {
   15051 		device_printf(dev,
   15052 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15053 		goto release;
   15054 	}
   15055 
   15056 	/* Copy MAC MTA to PHY MTA */
   15057 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15058 		uint16_t lo, hi;
   15059 
   15060 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15061 		lo = (uint16_t)(mreg & 0xffff);
   15062 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15063 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15064 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15065 	}
   15066 
   15067 	/* Configure PHY Rx Control register */
   15068 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15069 	mreg = CSR_READ(sc, WMREG_RCTL);
   15070 	if (mreg & RCTL_UPE)
   15071 		preg |= BM_RCTL_UPE;
   15072 	if (mreg & RCTL_MPE)
   15073 		preg |= BM_RCTL_MPE;
   15074 	preg &= ~(BM_RCTL_MO_MASK);
   15075 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15076 	if (moff != 0)
   15077 		preg |= moff << BM_RCTL_MO_SHIFT;
   15078 	if (mreg & RCTL_BAM)
   15079 		preg |= BM_RCTL_BAM;
   15080 	if (mreg & RCTL_PMCF)
   15081 		preg |= BM_RCTL_PMCF;
   15082 	mreg = CSR_READ(sc, WMREG_CTRL);
   15083 	if (mreg & CTRL_RFCE)
   15084 		preg |= BM_RCTL_RFCE;
   15085 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15086 
   15087 	wuc = WUC_APME | WUC_PME_EN;
   15088 	wufc = WUFC_MAG;
   15089 	/* Enable PHY wakeup in MAC register */
   15090 	CSR_WRITE(sc, WMREG_WUC,
   15091 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15092 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15093 
   15094 	/* Configure and enable PHY wakeup in PHY registers */
   15095 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15096 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15097 
   15098 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15099 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15100 
   15101 release:
   15102 	sc->phy.release(sc);
   15103 
   15104 	return 0;
   15105 }
   15106 
   15107 /* Power down workaround on D3 */
   15108 static void
   15109 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15110 {
   15111 	uint32_t reg;
   15112 	uint16_t phyreg;
   15113 	int i;
   15114 
   15115 	for (i = 0; i < 2; i++) {
   15116 		/* Disable link */
   15117 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15118 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15119 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15120 
   15121 		/*
   15122 		 * Call gig speed drop workaround on Gig disable before
   15123 		 * accessing any PHY registers
   15124 		 */
   15125 		if (sc->sc_type == WM_T_ICH8)
   15126 			wm_gig_downshift_workaround_ich8lan(sc);
   15127 
   15128 		/* Write VR power-down enable */
   15129 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15130 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15131 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15132 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15133 
   15134 		/* Read it back and test */
   15135 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15136 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15137 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15138 			break;
   15139 
   15140 		/* Issue PHY reset and repeat at most one more time */
   15141 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15142 	}
   15143 }
   15144 
   15145 /*
   15146  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15147  *  @sc: pointer to the HW structure
   15148  *
   15149  *  During S0 to Sx transition, it is possible the link remains at gig
   15150  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15151  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15152  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15153  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15154  *  needs to be written.
   15155  *  Parts that support (and are linked to a partner which support) EEE in
   15156  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15157  *  than 10Mbps w/o EEE.
   15158  */
   15159 static void
   15160 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15161 {
   15162 	device_t dev = sc->sc_dev;
   15163 	struct ethercom *ec = &sc->sc_ethercom;
   15164 	uint32_t phy_ctrl;
   15165 	int rv;
   15166 
   15167 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15168 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15169 
   15170 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15171 
   15172 	if (sc->sc_phytype == WMPHY_I217) {
   15173 		uint16_t devid = sc->sc_pcidevid;
   15174 
   15175 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15176 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15177 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15178 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15179 		    (sc->sc_type >= WM_T_PCH_SPT))
   15180 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15181 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15182 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15183 
   15184 		if (sc->phy.acquire(sc) != 0)
   15185 			goto out;
   15186 
   15187 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15188 			uint16_t eee_advert;
   15189 
   15190 			rv = wm_read_emi_reg_locked(dev,
   15191 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15192 			if (rv)
   15193 				goto release;
   15194 
   15195 			/*
   15196 			 * Disable LPLU if both link partners support 100BaseT
   15197 			 * EEE and 100Full is advertised on both ends of the
   15198 			 * link, and enable Auto Enable LPI since there will
   15199 			 * be no driver to enable LPI while in Sx.
   15200 			 */
   15201 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15202 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15203 				uint16_t anar, phy_reg;
   15204 
   15205 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15206 				    &anar);
   15207 				if (anar & ANAR_TX_FD) {
   15208 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15209 					    PHY_CTRL_NOND0A_LPLU);
   15210 
   15211 					/* Set Auto Enable LPI after link up */
   15212 					sc->phy.readreg_locked(dev, 2,
   15213 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15214 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15215 					sc->phy.writereg_locked(dev, 2,
   15216 					    I217_LPI_GPIO_CTRL, phy_reg);
   15217 				}
   15218 			}
   15219 		}
   15220 
   15221 		/*
   15222 		 * For i217 Intel Rapid Start Technology support,
   15223 		 * when the system is going into Sx and no manageability engine
   15224 		 * is present, the driver must configure proxy to reset only on
   15225 		 * power good.	LPI (Low Power Idle) state must also reset only
   15226 		 * on power good, as well as the MTA (Multicast table array).
   15227 		 * The SMBus release must also be disabled on LCD reset.
   15228 		 */
   15229 
   15230 		/*
   15231 		 * Enable MTA to reset for Intel Rapid Start Technology
   15232 		 * Support
   15233 		 */
   15234 
   15235 release:
   15236 		sc->phy.release(sc);
   15237 	}
   15238 out:
   15239 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15240 
   15241 	if (sc->sc_type == WM_T_ICH8)
   15242 		wm_gig_downshift_workaround_ich8lan(sc);
   15243 
   15244 	if (sc->sc_type >= WM_T_PCH) {
   15245 		wm_oem_bits_config_ich8lan(sc, false);
   15246 
   15247 		/* Reset PHY to activate OEM bits on 82577/8 */
   15248 		if (sc->sc_type == WM_T_PCH)
   15249 			wm_reset_phy(sc);
   15250 
   15251 		if (sc->phy.acquire(sc) != 0)
   15252 			return;
   15253 		wm_write_smbus_addr(sc);
   15254 		sc->phy.release(sc);
   15255 	}
   15256 }
   15257 
   15258 /*
   15259  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15260  *  @sc: pointer to the HW structure
   15261  *
   15262  *  During Sx to S0 transitions on non-managed devices or managed devices
   15263  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15264  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15265  *  the PHY.
   15266  *  On i217, setup Intel Rapid Start Technology.
   15267  */
   15268 static int
   15269 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15270 {
   15271 	device_t dev = sc->sc_dev;
   15272 	int rv;
   15273 
   15274 	if (sc->sc_type < WM_T_PCH2)
   15275 		return 0;
   15276 
   15277 	rv = wm_init_phy_workarounds_pchlan(sc);
   15278 	if (rv != 0)
   15279 		return -1;
   15280 
   15281 	/* For i217 Intel Rapid Start Technology support when the system
   15282 	 * is transitioning from Sx and no manageability engine is present
   15283 	 * configure SMBus to restore on reset, disable proxy, and enable
   15284 	 * the reset on MTA (Multicast table array).
   15285 	 */
   15286 	if (sc->sc_phytype == WMPHY_I217) {
   15287 		uint16_t phy_reg;
   15288 
   15289 		if (sc->phy.acquire(sc) != 0)
   15290 			return -1;
   15291 
   15292 		/* Clear Auto Enable LPI after link up */
   15293 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15294 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15295 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15296 
   15297 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15298 			/* Restore clear on SMB if no manageability engine
   15299 			 * is present
   15300 			 */
   15301 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15302 			    &phy_reg);
   15303 			if (rv != 0)
   15304 				goto release;
   15305 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15306 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15307 
   15308 			/* Disable Proxy */
   15309 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15310 		}
   15311 		/* Enable reset on MTA */
   15312 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15313 		if (rv != 0)
   15314 			goto release;
   15315 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15316 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15317 
   15318 release:
   15319 		sc->phy.release(sc);
   15320 		return rv;
   15321 	}
   15322 
   15323 	return 0;
   15324 }
   15325 
   15326 static void
   15327 wm_enable_wakeup(struct wm_softc *sc)
   15328 {
   15329 	uint32_t reg, pmreg;
   15330 	pcireg_t pmode;
   15331 	int rv = 0;
   15332 
   15333 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15334 		device_xname(sc->sc_dev), __func__));
   15335 
   15336 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15337 	    &pmreg, NULL) == 0)
   15338 		return;
   15339 
   15340 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15341 		goto pme;
   15342 
   15343 	/* Advertise the wakeup capability */
   15344 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15345 	    | CTRL_SWDPIN(3));
   15346 
   15347 	/* Keep the laser running on fiber adapters */
   15348 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15349 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15350 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15351 		reg |= CTRL_EXT_SWDPIN(3);
   15352 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15353 	}
   15354 
   15355 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15356 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15357 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15358 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15359 		wm_suspend_workarounds_ich8lan(sc);
   15360 
   15361 #if 0	/* For the multicast packet */
   15362 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15363 	reg |= WUFC_MC;
   15364 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15365 #endif
   15366 
   15367 	if (sc->sc_type >= WM_T_PCH) {
   15368 		rv = wm_enable_phy_wakeup(sc);
   15369 		if (rv != 0)
   15370 			goto pme;
   15371 	} else {
   15372 		/* Enable wakeup by the MAC */
   15373 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15374 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15375 	}
   15376 
   15377 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15378 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15379 		|| (sc->sc_type == WM_T_PCH2))
   15380 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15381 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15382 
   15383 pme:
   15384 	/* Request PME */
   15385 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15386 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   15387 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15388 		/* For WOL */
   15389 		pmode |= PCI_PMCSR_PME_EN;
   15390 	} else {
   15391 		/* Disable WOL */
   15392 		pmode &= ~PCI_PMCSR_PME_EN;
   15393 	}
   15394 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15395 }
   15396 
   15397 /* Disable ASPM L0s and/or L1 for workaround */
   15398 static void
   15399 wm_disable_aspm(struct wm_softc *sc)
   15400 {
   15401 	pcireg_t reg, mask = 0;
   15402 	unsigned const char *str = "";
   15403 
   15404 	/*
   15405 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15406 	 * space.
   15407 	 */
   15408 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15409 		return;
   15410 
   15411 	switch (sc->sc_type) {
   15412 	case WM_T_82571:
   15413 	case WM_T_82572:
   15414 		/*
   15415 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15416 		 * State Power management L1 State (ASPM L1).
   15417 		 */
   15418 		mask = PCIE_LCSR_ASPM_L1;
   15419 		str = "L1 is";
   15420 		break;
   15421 	case WM_T_82573:
   15422 	case WM_T_82574:
   15423 	case WM_T_82583:
   15424 		/*
   15425 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15426 		 *
   15427 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15428 		 * some chipset.  The document of 82574 and 82583 says that
   15429 		 * disabling L0s with some specific chipset is sufficient,
   15430 		 * but we follow as of the Intel em driver does.
   15431 		 *
   15432 		 * References:
   15433 		 * Errata 8 of the Specification Update of i82573.
   15434 		 * Errata 20 of the Specification Update of i82574.
   15435 		 * Errata 9 of the Specification Update of i82583.
   15436 		 */
   15437 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15438 		str = "L0s and L1 are";
   15439 		break;
   15440 	default:
   15441 		return;
   15442 	}
   15443 
   15444 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15445 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15446 	reg &= ~mask;
   15447 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15448 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15449 
   15450 	/* Print only in wm_attach() */
   15451 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15452 		aprint_verbose_dev(sc->sc_dev,
   15453 		    "ASPM %s disabled to workaround the errata.\n", str);
   15454 }
   15455 
   15456 /* LPLU */
   15457 
   15458 static void
   15459 wm_lplu_d0_disable(struct wm_softc *sc)
   15460 {
   15461 	struct mii_data *mii = &sc->sc_mii;
   15462 	uint32_t reg;
   15463 	uint16_t phyval;
   15464 
   15465 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15466 		device_xname(sc->sc_dev), __func__));
   15467 
   15468 	if (sc->sc_phytype == WMPHY_IFE)
   15469 		return;
   15470 
   15471 	switch (sc->sc_type) {
   15472 	case WM_T_82571:
   15473 	case WM_T_82572:
   15474 	case WM_T_82573:
   15475 	case WM_T_82575:
   15476 	case WM_T_82576:
   15477 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   15478 		phyval &= ~PMR_D0_LPLU;
   15479 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   15480 		break;
   15481 	case WM_T_82580:
   15482 	case WM_T_I350:
   15483 	case WM_T_I210:
   15484 	case WM_T_I211:
   15485 		reg = CSR_READ(sc, WMREG_PHPM);
   15486 		reg &= ~PHPM_D0A_LPLU;
   15487 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15488 		break;
   15489 	case WM_T_82574:
   15490 	case WM_T_82583:
   15491 	case WM_T_ICH8:
   15492 	case WM_T_ICH9:
   15493 	case WM_T_ICH10:
   15494 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15495 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15496 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15497 		CSR_WRITE_FLUSH(sc);
   15498 		break;
   15499 	case WM_T_PCH:
   15500 	case WM_T_PCH2:
   15501 	case WM_T_PCH_LPT:
   15502 	case WM_T_PCH_SPT:
   15503 	case WM_T_PCH_CNP:
   15504 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15505 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15506 		if (wm_phy_resetisblocked(sc) == false)
   15507 			phyval |= HV_OEM_BITS_ANEGNOW;
   15508 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15509 		break;
   15510 	default:
   15511 		break;
   15512 	}
   15513 }
   15514 
   15515 /* EEE */
   15516 
   15517 static int
   15518 wm_set_eee_i350(struct wm_softc *sc)
   15519 {
   15520 	struct ethercom *ec = &sc->sc_ethercom;
   15521 	uint32_t ipcnfg, eeer;
   15522 	uint32_t ipcnfg_mask
   15523 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15524 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15525 
   15526 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15527 
   15528 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15529 	eeer = CSR_READ(sc, WMREG_EEER);
   15530 
   15531 	/* Enable or disable per user setting */
   15532 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15533 		ipcnfg |= ipcnfg_mask;
   15534 		eeer |= eeer_mask;
   15535 	} else {
   15536 		ipcnfg &= ~ipcnfg_mask;
   15537 		eeer &= ~eeer_mask;
   15538 	}
   15539 
   15540 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15541 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15542 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15543 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15544 
   15545 	return 0;
   15546 }
   15547 
   15548 static int
   15549 wm_set_eee_pchlan(struct wm_softc *sc)
   15550 {
   15551 	device_t dev = sc->sc_dev;
   15552 	struct ethercom *ec = &sc->sc_ethercom;
   15553 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15554 	int rv = 0;
   15555 
   15556 	switch (sc->sc_phytype) {
   15557 	case WMPHY_82579:
   15558 		lpa = I82579_EEE_LP_ABILITY;
   15559 		pcs_status = I82579_EEE_PCS_STATUS;
   15560 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15561 		break;
   15562 	case WMPHY_I217:
   15563 		lpa = I217_EEE_LP_ABILITY;
   15564 		pcs_status = I217_EEE_PCS_STATUS;
   15565 		adv_addr = I217_EEE_ADVERTISEMENT;
   15566 		break;
   15567 	default:
   15568 		return 0;
   15569 	}
   15570 
   15571 	if (sc->phy.acquire(sc)) {
   15572 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15573 		return 0;
   15574 	}
   15575 
   15576 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15577 	if (rv != 0)
   15578 		goto release;
   15579 
   15580 	/* Clear bits that enable EEE in various speeds */
   15581 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15582 
   15583 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15584 		/* Save off link partner's EEE ability */
   15585 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15586 		if (rv != 0)
   15587 			goto release;
   15588 
   15589 		/* Read EEE advertisement */
   15590 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15591 			goto release;
   15592 
   15593 		/*
   15594 		 * Enable EEE only for speeds in which the link partner is
   15595 		 * EEE capable and for which we advertise EEE.
   15596 		 */
   15597 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15598 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15599 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15600 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15601 			if ((data & ANLPAR_TX_FD) != 0)
   15602 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15603 			else {
   15604 				/*
   15605 				 * EEE is not supported in 100Half, so ignore
   15606 				 * partner's EEE in 100 ability if full-duplex
   15607 				 * is not advertised.
   15608 				 */
   15609 				sc->eee_lp_ability
   15610 				    &= ~AN_EEEADVERT_100_TX;
   15611 			}
   15612 		}
   15613 	}
   15614 
   15615 	if (sc->sc_phytype == WMPHY_82579) {
   15616 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15617 		if (rv != 0)
   15618 			goto release;
   15619 
   15620 		data &= ~I82579_LPI_PLL_SHUT_100;
   15621 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15622 	}
   15623 
   15624 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15625 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15626 		goto release;
   15627 
   15628 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15629 release:
   15630 	sc->phy.release(sc);
   15631 
   15632 	return rv;
   15633 }
   15634 
   15635 static int
   15636 wm_set_eee(struct wm_softc *sc)
   15637 {
   15638 	struct ethercom *ec = &sc->sc_ethercom;
   15639 
   15640 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15641 		return 0;
   15642 
   15643 	if (sc->sc_type == WM_T_I354) {
   15644 		/* I354 uses an external PHY */
   15645 		return 0; /* not yet */
   15646 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15647 		return wm_set_eee_i350(sc);
   15648 	else if (sc->sc_type >= WM_T_PCH2)
   15649 		return wm_set_eee_pchlan(sc);
   15650 
   15651 	return 0;
   15652 }
   15653 
   15654 /*
   15655  * Workarounds (mainly PHY related).
   15656  * Basically, PHY's workarounds are in the PHY drivers.
   15657  */
   15658 
   15659 /* Work-around for 82566 Kumeran PCS lock loss */
   15660 static int
   15661 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15662 {
   15663 	struct mii_data *mii = &sc->sc_mii;
   15664 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15665 	int i, reg, rv;
   15666 	uint16_t phyreg;
   15667 
   15668 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15669 		device_xname(sc->sc_dev), __func__));
   15670 
   15671 	/* If the link is not up, do nothing */
   15672 	if ((status & STATUS_LU) == 0)
   15673 		return 0;
   15674 
   15675 	/* Nothing to do if the link is other than 1Gbps */
   15676 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15677 		return 0;
   15678 
   15679 	for (i = 0; i < 10; i++) {
   15680 		/* read twice */
   15681 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15682 		if (rv != 0)
   15683 			return rv;
   15684 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15685 		if (rv != 0)
   15686 			return rv;
   15687 
   15688 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15689 			goto out;	/* GOOD! */
   15690 
   15691 		/* Reset the PHY */
   15692 		wm_reset_phy(sc);
   15693 		delay(5*1000);
   15694 	}
   15695 
   15696 	/* Disable GigE link negotiation */
   15697 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15698 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15699 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15700 
   15701 	/*
   15702 	 * Call gig speed drop workaround on Gig disable before accessing
   15703 	 * any PHY registers.
   15704 	 */
   15705 	wm_gig_downshift_workaround_ich8lan(sc);
   15706 
   15707 out:
   15708 	return 0;
   15709 }
   15710 
   15711 /*
   15712  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15713  *  @sc: pointer to the HW structure
   15714  *
   15715  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15716  *  LPLU, Gig disable, MDIC PHY reset):
   15717  *    1) Set Kumeran Near-end loopback
   15718  *    2) Clear Kumeran Near-end loopback
   15719  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15720  */
   15721 static void
   15722 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15723 {
   15724 	uint16_t kmreg;
   15725 
   15726 	/* Only for igp3 */
   15727 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15728 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15729 			return;
   15730 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15731 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15732 			return;
   15733 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15734 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15735 	}
   15736 }
   15737 
   15738 /*
   15739  * Workaround for pch's PHYs
   15740  * XXX should be moved to new PHY driver?
   15741  */
   15742 static int
   15743 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15744 {
   15745 	device_t dev = sc->sc_dev;
   15746 	struct mii_data *mii = &sc->sc_mii;
   15747 	struct mii_softc *child;
   15748 	uint16_t phy_data, phyrev = 0;
   15749 	int phytype = sc->sc_phytype;
   15750 	int rv;
   15751 
   15752 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15753 		device_xname(dev), __func__));
   15754 	KASSERT(sc->sc_type == WM_T_PCH);
   15755 
   15756 	/* Set MDIO slow mode before any other MDIO access */
   15757 	if (phytype == WMPHY_82577)
   15758 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15759 			return rv;
   15760 
   15761 	child = LIST_FIRST(&mii->mii_phys);
   15762 	if (child != NULL)
   15763 		phyrev = child->mii_mpd_rev;
   15764 
   15765 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15766 	if ((child != NULL) &&
   15767 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15768 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15769 		/* Disable generation of early preamble (0x4431) */
   15770 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15771 		    &phy_data);
   15772 		if (rv != 0)
   15773 			return rv;
   15774 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15775 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15776 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15777 		    phy_data);
   15778 		if (rv != 0)
   15779 			return rv;
   15780 
   15781 		/* Preamble tuning for SSC */
   15782 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15783 		if (rv != 0)
   15784 			return rv;
   15785 	}
   15786 
   15787 	/* 82578 */
   15788 	if (phytype == WMPHY_82578) {
   15789 		/*
   15790 		 * Return registers to default by doing a soft reset then
   15791 		 * writing 0x3140 to the control register
   15792 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15793 		 */
   15794 		if ((child != NULL) && (phyrev < 2)) {
   15795 			PHY_RESET(child);
   15796 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   15797 			if (rv != 0)
   15798 				return rv;
   15799 		}
   15800 	}
   15801 
   15802 	/* Select page 0 */
   15803 	if ((rv = sc->phy.acquire(sc)) != 0)
   15804 		return rv;
   15805 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15806 	sc->phy.release(sc);
   15807 	if (rv != 0)
   15808 		return rv;
   15809 
   15810 	/*
   15811 	 * Configure the K1 Si workaround during phy reset assuming there is
   15812 	 * link so that it disables K1 if link is in 1Gbps.
   15813 	 */
   15814 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15815 		return rv;
   15816 
   15817 	/* Workaround for link disconnects on a busy hub in half duplex */
   15818 	rv = sc->phy.acquire(sc);
   15819 	if (rv)
   15820 		return rv;
   15821 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15822 	if (rv)
   15823 		goto release;
   15824 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15825 	    phy_data & 0x00ff);
   15826 	if (rv)
   15827 		goto release;
   15828 
   15829 	/* Set MSE higher to enable link to stay up when noise is high */
   15830 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15831 release:
   15832 	sc->phy.release(sc);
   15833 
   15834 	return rv;
   15835 }
   15836 
   15837 /*
   15838  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15839  *  @sc:   pointer to the HW structure
   15840  */
   15841 static void
   15842 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15843 {
   15844 	device_t dev = sc->sc_dev;
   15845 	uint32_t mac_reg;
   15846 	uint16_t i, wuce;
   15847 	int count;
   15848 
   15849 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15850 		device_xname(sc->sc_dev), __func__));
   15851 
   15852 	if (sc->phy.acquire(sc) != 0)
   15853 		return;
   15854 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15855 		goto release;
   15856 
   15857 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15858 	count = wm_rar_count(sc);
   15859 	for (i = 0; i < count; i++) {
   15860 		uint16_t lo, hi;
   15861 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15862 		lo = (uint16_t)(mac_reg & 0xffff);
   15863 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15864 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15865 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15866 
   15867 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15868 		lo = (uint16_t)(mac_reg & 0xffff);
   15869 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15870 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15871 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15872 	}
   15873 
   15874 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15875 
   15876 release:
   15877 	sc->phy.release(sc);
   15878 }
   15879 
   15880 /*
   15881  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15882  *  done after every PHY reset.
   15883  */
   15884 static int
   15885 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15886 {
   15887 	device_t dev = sc->sc_dev;
   15888 	int rv;
   15889 
   15890 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15891 		device_xname(dev), __func__));
   15892 	KASSERT(sc->sc_type == WM_T_PCH2);
   15893 
   15894 	/* Set MDIO slow mode before any other MDIO access */
   15895 	rv = wm_set_mdio_slow_mode_hv(sc);
   15896 	if (rv != 0)
   15897 		return rv;
   15898 
   15899 	rv = sc->phy.acquire(sc);
   15900 	if (rv != 0)
   15901 		return rv;
   15902 	/* Set MSE higher to enable link to stay up when noise is high */
   15903 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15904 	if (rv != 0)
   15905 		goto release;
   15906 	/* Drop link after 5 times MSE threshold was reached */
   15907 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15908 release:
   15909 	sc->phy.release(sc);
   15910 
   15911 	return rv;
   15912 }
   15913 
   15914 /**
   15915  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15916  *  @link: link up bool flag
   15917  *
   15918  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15919  *  preventing further DMA write requests.  Workaround the issue by disabling
   15920  *  the de-assertion of the clock request when in 1Gpbs mode.
   15921  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15922  *  speeds in order to avoid Tx hangs.
   15923  **/
   15924 static int
   15925 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15926 {
   15927 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15928 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15929 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15930 	uint16_t phyreg;
   15931 
   15932 	if (link && (speed == STATUS_SPEED_1000)) {
   15933 		sc->phy.acquire(sc);
   15934 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15935 		    &phyreg);
   15936 		if (rv != 0)
   15937 			goto release;
   15938 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15939 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15940 		if (rv != 0)
   15941 			goto release;
   15942 		delay(20);
   15943 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15944 
   15945 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15946 		    &phyreg);
   15947 release:
   15948 		sc->phy.release(sc);
   15949 		return rv;
   15950 	}
   15951 
   15952 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15953 
   15954 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15955 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15956 	    || !link
   15957 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15958 		goto update_fextnvm6;
   15959 
   15960 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15961 
   15962 	/* Clear link status transmit timeout */
   15963 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15964 	if (speed == STATUS_SPEED_100) {
   15965 		/* Set inband Tx timeout to 5x10us for 100Half */
   15966 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15967 
   15968 		/* Do not extend the K1 entry latency for 100Half */
   15969 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15970 	} else {
   15971 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15972 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15973 
   15974 		/* Extend the K1 entry latency for 10 Mbps */
   15975 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15976 	}
   15977 
   15978 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15979 
   15980 update_fextnvm6:
   15981 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15982 	return 0;
   15983 }
   15984 
   15985 /*
   15986  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15987  *  @sc:   pointer to the HW structure
   15988  *  @link: link up bool flag
   15989  *
   15990  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15991  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15992  *  If link is down, the function will restore the default K1 setting located
   15993  *  in the NVM.
   15994  */
   15995 static int
   15996 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15997 {
   15998 	int k1_enable = sc->sc_nvm_k1_enabled;
   15999 
   16000 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16001 		device_xname(sc->sc_dev), __func__));
   16002 
   16003 	if (sc->phy.acquire(sc) != 0)
   16004 		return -1;
   16005 
   16006 	if (link) {
   16007 		k1_enable = 0;
   16008 
   16009 		/* Link stall fix for link up */
   16010 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16011 		    0x0100);
   16012 	} else {
   16013 		/* Link stall fix for link down */
   16014 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16015 		    0x4100);
   16016 	}
   16017 
   16018 	wm_configure_k1_ich8lan(sc, k1_enable);
   16019 	sc->phy.release(sc);
   16020 
   16021 	return 0;
   16022 }
   16023 
   16024 /*
   16025  *  wm_k1_workaround_lv - K1 Si workaround
   16026  *  @sc:   pointer to the HW structure
   16027  *
   16028  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16029  *  Disable K1 for 1000 and 100 speeds
   16030  */
   16031 static int
   16032 wm_k1_workaround_lv(struct wm_softc *sc)
   16033 {
   16034 	uint32_t reg;
   16035 	uint16_t phyreg;
   16036 	int rv;
   16037 
   16038 	if (sc->sc_type != WM_T_PCH2)
   16039 		return 0;
   16040 
   16041 	/* Set K1 beacon duration based on 10Mbps speed */
   16042 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16043 	if (rv != 0)
   16044 		return rv;
   16045 
   16046 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16047 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16048 		if (phyreg &
   16049 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16050 			/* LV 1G/100 Packet drop issue wa  */
   16051 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16052 			    &phyreg);
   16053 			if (rv != 0)
   16054 				return rv;
   16055 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16056 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16057 			    phyreg);
   16058 			if (rv != 0)
   16059 				return rv;
   16060 		} else {
   16061 			/* For 10Mbps */
   16062 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16063 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16064 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16065 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16066 		}
   16067 	}
   16068 
   16069 	return 0;
   16070 }
   16071 
   16072 /*
   16073  *  wm_link_stall_workaround_hv - Si workaround
   16074  *  @sc: pointer to the HW structure
   16075  *
   16076  *  This function works around a Si bug where the link partner can get
   16077  *  a link up indication before the PHY does. If small packets are sent
   16078  *  by the link partner they can be placed in the packet buffer without
   16079  *  being properly accounted for by the PHY and will stall preventing
   16080  *  further packets from being received.  The workaround is to clear the
   16081  *  packet buffer after the PHY detects link up.
   16082  */
   16083 static int
   16084 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16085 {
   16086 	uint16_t phyreg;
   16087 
   16088 	if (sc->sc_phytype != WMPHY_82578)
   16089 		return 0;
   16090 
   16091 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16092 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16093 	if ((phyreg & BMCR_LOOP) != 0)
   16094 		return 0;
   16095 
   16096 	/* Check if link is up and at 1Gbps */
   16097 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16098 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16099 	    | BM_CS_STATUS_SPEED_MASK;
   16100 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16101 		| BM_CS_STATUS_SPEED_1000))
   16102 		return 0;
   16103 
   16104 	delay(200 * 1000);	/* XXX too big */
   16105 
   16106 	/* Flush the packets in the fifo buffer */
   16107 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16108 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16109 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16110 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16111 
   16112 	return 0;
   16113 }
   16114 
   16115 static int
   16116 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16117 {
   16118 	int rv;
   16119 	uint16_t reg;
   16120 
   16121 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16122 	if (rv != 0)
   16123 		return rv;
   16124 
   16125 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16126 	    reg | HV_KMRN_MDIO_SLOW);
   16127 }
   16128 
   16129 /*
   16130  *  wm_configure_k1_ich8lan - Configure K1 power state
   16131  *  @sc: pointer to the HW structure
   16132  *  @enable: K1 state to configure
   16133  *
   16134  *  Configure the K1 power state based on the provided parameter.
   16135  *  Assumes semaphore already acquired.
   16136  */
   16137 static void
   16138 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16139 {
   16140 	uint32_t ctrl, ctrl_ext, tmp;
   16141 	uint16_t kmreg;
   16142 	int rv;
   16143 
   16144 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16145 
   16146 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16147 	if (rv != 0)
   16148 		return;
   16149 
   16150 	if (k1_enable)
   16151 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16152 	else
   16153 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16154 
   16155 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16156 	if (rv != 0)
   16157 		return;
   16158 
   16159 	delay(20);
   16160 
   16161 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16162 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16163 
   16164 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16165 	tmp |= CTRL_FRCSPD;
   16166 
   16167 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16168 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16169 	CSR_WRITE_FLUSH(sc);
   16170 	delay(20);
   16171 
   16172 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16173 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16174 	CSR_WRITE_FLUSH(sc);
   16175 	delay(20);
   16176 
   16177 	return;
   16178 }
   16179 
   16180 /* special case - for 82575 - need to do manual init ... */
   16181 static void
   16182 wm_reset_init_script_82575(struct wm_softc *sc)
   16183 {
   16184 	/*
   16185 	 * Remark: this is untested code - we have no board without EEPROM
   16186 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16187 	 */
   16188 
   16189 	/* SerDes configuration via SERDESCTRL */
   16190 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16191 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16192 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16193 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16194 
   16195 	/* CCM configuration via CCMCTL register */
   16196 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16197 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16198 
   16199 	/* PCIe lanes configuration */
   16200 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16201 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16202 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16203 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16204 
   16205 	/* PCIe PLL Configuration */
   16206 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16207 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16208 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16209 }
   16210 
   16211 static void
   16212 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16213 {
   16214 	uint32_t reg;
   16215 	uint16_t nvmword;
   16216 	int rv;
   16217 
   16218 	if (sc->sc_type != WM_T_82580)
   16219 		return;
   16220 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16221 		return;
   16222 
   16223 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16224 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16225 	if (rv != 0) {
   16226 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16227 		    __func__);
   16228 		return;
   16229 	}
   16230 
   16231 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16232 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16233 		reg |= MDICNFG_DEST;
   16234 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16235 		reg |= MDICNFG_COM_MDIO;
   16236 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16237 }
   16238 
   16239 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16240 
   16241 static bool
   16242 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16243 {
   16244 	uint32_t reg;
   16245 	uint16_t id1, id2;
   16246 	int i, rv;
   16247 
   16248 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16249 		device_xname(sc->sc_dev), __func__));
   16250 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16251 
   16252 	id1 = id2 = 0xffff;
   16253 	for (i = 0; i < 2; i++) {
   16254 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16255 		    &id1);
   16256 		if ((rv != 0) || MII_INVALIDID(id1))
   16257 			continue;
   16258 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16259 		    &id2);
   16260 		if ((rv != 0) || MII_INVALIDID(id2))
   16261 			continue;
   16262 		break;
   16263 	}
   16264 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16265 		goto out;
   16266 
   16267 	/*
   16268 	 * In case the PHY needs to be in mdio slow mode,
   16269 	 * set slow mode and try to get the PHY id again.
   16270 	 */
   16271 	rv = 0;
   16272 	if (sc->sc_type < WM_T_PCH_LPT) {
   16273 		sc->phy.release(sc);
   16274 		wm_set_mdio_slow_mode_hv(sc);
   16275 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16276 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16277 		sc->phy.acquire(sc);
   16278 	}
   16279 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16280 		device_printf(sc->sc_dev, "XXX return with false\n");
   16281 		return false;
   16282 	}
   16283 out:
   16284 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16285 		/* Only unforce SMBus if ME is not active */
   16286 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16287 			uint16_t phyreg;
   16288 
   16289 			/* Unforce SMBus mode in PHY */
   16290 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16291 			    CV_SMB_CTRL, &phyreg);
   16292 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16293 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16294 			    CV_SMB_CTRL, phyreg);
   16295 
   16296 			/* Unforce SMBus mode in MAC */
   16297 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16298 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16299 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16300 		}
   16301 	}
   16302 	return true;
   16303 }
   16304 
   16305 static void
   16306 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16307 {
   16308 	uint32_t reg;
   16309 	int i;
   16310 
   16311 	/* Set PHY Config Counter to 50msec */
   16312 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16313 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16314 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16315 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16316 
   16317 	/* Toggle LANPHYPC */
   16318 	reg = CSR_READ(sc, WMREG_CTRL);
   16319 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16320 	reg &= ~CTRL_LANPHYPC_VALUE;
   16321 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16322 	CSR_WRITE_FLUSH(sc);
   16323 	delay(1000);
   16324 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16325 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16326 	CSR_WRITE_FLUSH(sc);
   16327 
   16328 	if (sc->sc_type < WM_T_PCH_LPT)
   16329 		delay(50 * 1000);
   16330 	else {
   16331 		i = 20;
   16332 
   16333 		do {
   16334 			delay(5 * 1000);
   16335 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16336 		    && i--);
   16337 
   16338 		delay(30 * 1000);
   16339 	}
   16340 }
   16341 
   16342 static int
   16343 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16344 {
   16345 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16346 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16347 	uint32_t rxa;
   16348 	uint16_t scale = 0, lat_enc = 0;
   16349 	int32_t obff_hwm = 0;
   16350 	int64_t lat_ns, value;
   16351 
   16352 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16353 		device_xname(sc->sc_dev), __func__));
   16354 
   16355 	if (link) {
   16356 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16357 		uint32_t status;
   16358 		uint16_t speed;
   16359 		pcireg_t preg;
   16360 
   16361 		status = CSR_READ(sc, WMREG_STATUS);
   16362 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16363 		case STATUS_SPEED_10:
   16364 			speed = 10;
   16365 			break;
   16366 		case STATUS_SPEED_100:
   16367 			speed = 100;
   16368 			break;
   16369 		case STATUS_SPEED_1000:
   16370 			speed = 1000;
   16371 			break;
   16372 		default:
   16373 			device_printf(sc->sc_dev, "Unknown speed "
   16374 			    "(status = %08x)\n", status);
   16375 			return -1;
   16376 		}
   16377 
   16378 		/* Rx Packet Buffer Allocation size (KB) */
   16379 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16380 
   16381 		/*
   16382 		 * Determine the maximum latency tolerated by the device.
   16383 		 *
   16384 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16385 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16386 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16387 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16388 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16389 		 */
   16390 		lat_ns = ((int64_t)rxa * 1024 -
   16391 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16392 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16393 		if (lat_ns < 0)
   16394 			lat_ns = 0;
   16395 		else
   16396 			lat_ns /= speed;
   16397 		value = lat_ns;
   16398 
   16399 		while (value > LTRV_VALUE) {
   16400 			scale ++;
   16401 			value = howmany(value, __BIT(5));
   16402 		}
   16403 		if (scale > LTRV_SCALE_MAX) {
   16404 			device_printf(sc->sc_dev,
   16405 			    "Invalid LTR latency scale %d\n", scale);
   16406 			return -1;
   16407 		}
   16408 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16409 
   16410 		/* Determine the maximum latency tolerated by the platform */
   16411 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16412 		    WM_PCI_LTR_CAP_LPT);
   16413 		max_snoop = preg & 0xffff;
   16414 		max_nosnoop = preg >> 16;
   16415 
   16416 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16417 
   16418 		if (lat_enc > max_ltr_enc) {
   16419 			lat_enc = max_ltr_enc;
   16420 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16421 			    * PCI_LTR_SCALETONS(
   16422 				    __SHIFTOUT(lat_enc,
   16423 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16424 		}
   16425 
   16426 		if (lat_ns) {
   16427 			lat_ns *= speed * 1000;
   16428 			lat_ns /= 8;
   16429 			lat_ns /= 1000000000;
   16430 			obff_hwm = (int32_t)(rxa - lat_ns);
   16431 		}
   16432 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16433 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16434 			    "(rxa = %d, lat_ns = %d)\n",
   16435 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16436 			return -1;
   16437 		}
   16438 	}
   16439 	/* Snoop and No-Snoop latencies the same */
   16440 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16441 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16442 
   16443 	/* Set OBFF high water mark */
   16444 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16445 	reg |= obff_hwm;
   16446 	CSR_WRITE(sc, WMREG_SVT, reg);
   16447 
   16448 	/* Enable OBFF */
   16449 	reg = CSR_READ(sc, WMREG_SVCR);
   16450 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16451 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16452 
   16453 	return 0;
   16454 }
   16455 
   16456 /*
   16457  * I210 Errata 25 and I211 Errata 10
   16458  * Slow System Clock.
   16459  */
   16460 static int
   16461 wm_pll_workaround_i210(struct wm_softc *sc)
   16462 {
   16463 	uint32_t mdicnfg, wuc;
   16464 	uint32_t reg;
   16465 	pcireg_t pcireg;
   16466 	uint32_t pmreg;
   16467 	uint16_t nvmword, tmp_nvmword;
   16468 	uint16_t phyval;
   16469 	bool wa_done = false;
   16470 	int i, rv = 0;
   16471 
   16472 	/* Get Power Management cap offset */
   16473 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16474 	    &pmreg, NULL) == 0)
   16475 		return -1;
   16476 
   16477 	/* Save WUC and MDICNFG registers */
   16478 	wuc = CSR_READ(sc, WMREG_WUC);
   16479 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16480 
   16481 	reg = mdicnfg & ~MDICNFG_DEST;
   16482 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16483 
   16484 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   16485 		nvmword = INVM_DEFAULT_AL;
   16486 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16487 
   16488 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16489 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16490 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16491 
   16492 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16493 			rv = 0;
   16494 			break; /* OK */
   16495 		} else
   16496 			rv = -1;
   16497 
   16498 		wa_done = true;
   16499 		/* Directly reset the internal PHY */
   16500 		reg = CSR_READ(sc, WMREG_CTRL);
   16501 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16502 
   16503 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16504 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16505 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16506 
   16507 		CSR_WRITE(sc, WMREG_WUC, 0);
   16508 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16509 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16510 
   16511 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16512 		    pmreg + PCI_PMCSR);
   16513 		pcireg |= PCI_PMCSR_STATE_D3;
   16514 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16515 		    pmreg + PCI_PMCSR, pcireg);
   16516 		delay(1000);
   16517 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16518 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16519 		    pmreg + PCI_PMCSR, pcireg);
   16520 
   16521 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16522 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16523 
   16524 		/* Restore WUC register */
   16525 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16526 	}
   16527 
   16528 	/* Restore MDICNFG setting */
   16529 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16530 	if (wa_done)
   16531 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16532 	return rv;
   16533 }
   16534 
   16535 static void
   16536 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16537 {
   16538 	uint32_t reg;
   16539 
   16540 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16541 		device_xname(sc->sc_dev), __func__));
   16542 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16543 	    || (sc->sc_type == WM_T_PCH_CNP));
   16544 
   16545 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16546 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16547 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16548 
   16549 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16550 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16551 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16552 }
   16553