Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.654
      1 /*	$NetBSD: if_wm.c,v 1.654 2019/12/11 09:48:16 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.654 2019/12/11 09:48:16 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 
    109 #include <sys/rndsource.h>
    110 
    111 #include <net/if.h>
    112 #include <net/if_dl.h>
    113 #include <net/if_media.h>
    114 #include <net/if_ether.h>
    115 
    116 #include <net/bpf.h>
    117 
    118 #include <net/rss_config.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/mdio.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
    161 #else
    162 #define	DPRINTF(x, y)	__nothing
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256U
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 	krndsource_t rnd_source;	/* random source */
    462 };
    463 
    464 struct wm_phyop {
    465 	int (*acquire)(struct wm_softc *);
    466 	void (*release)(struct wm_softc *);
    467 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    468 	int (*writereg_locked)(device_t, int, int, uint16_t);
    469 	int reset_delay_us;
    470 };
    471 
    472 struct wm_nvmop {
    473 	int (*acquire)(struct wm_softc *);
    474 	void (*release)(struct wm_softc *);
    475 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    476 };
    477 
    478 /*
    479  * Software state per device.
    480  */
    481 struct wm_softc {
    482 	device_t sc_dev;		/* generic device information */
    483 	bus_space_tag_t sc_st;		/* bus space tag */
    484 	bus_space_handle_t sc_sh;	/* bus space handle */
    485 	bus_size_t sc_ss;		/* bus space size */
    486 	bus_space_tag_t sc_iot;		/* I/O space tag */
    487 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    488 	bus_size_t sc_ios;		/* I/O space size */
    489 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    490 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    491 	bus_size_t sc_flashs;		/* flash registers space size */
    492 	off_t sc_flashreg_offset;	/*
    493 					 * offset to flash registers from
    494 					 * start of BAR
    495 					 */
    496 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    497 
    498 	struct ethercom sc_ethercom;	/* ethernet common data */
    499 	struct mii_data sc_mii;		/* MII/media information */
    500 
    501 	pci_chipset_tag_t sc_pc;
    502 	pcitag_t sc_pcitag;
    503 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    504 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    505 
    506 	uint16_t sc_pcidevid;		/* PCI device ID */
    507 	wm_chip_type sc_type;		/* MAC type */
    508 	int sc_rev;			/* MAC revision */
    509 	wm_phy_type sc_phytype;		/* PHY type */
    510 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    511 #define	WM_MEDIATYPE_UNKNOWN		0x00
    512 #define	WM_MEDIATYPE_FIBER		0x01
    513 #define	WM_MEDIATYPE_COPPER		0x02
    514 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    515 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    516 	int sc_flags;			/* flags; see below */
    517 	u_short sc_if_flags;		/* last if_flags */
    518 	int sc_ec_capenable;		/* last ec_capenable */
    519 	int sc_flowflags;		/* 802.3x flow control flags */
    520 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    521 	int sc_align_tweak;
    522 
    523 	void *sc_ihs[WM_MAX_NINTR];	/*
    524 					 * interrupt cookie.
    525 					 * - legacy and msi use sc_ihs[0] only
    526 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    527 					 */
    528 	pci_intr_handle_t *sc_intrs;	/*
    529 					 * legacy and msi use sc_intrs[0] only
    530 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    531 					 */
    532 	int sc_nintrs;			/* number of interrupts */
    533 
    534 	int sc_link_intr_idx;		/* index of MSI-X tables */
    535 
    536 	callout_t sc_tick_ch;		/* tick callout */
    537 	bool sc_core_stopping;
    538 
    539 	int sc_nvm_ver_major;
    540 	int sc_nvm_ver_minor;
    541 	int sc_nvm_ver_build;
    542 	int sc_nvm_addrbits;		/* NVM address bits */
    543 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    544 	int sc_ich8_flash_base;
    545 	int sc_ich8_flash_bank_size;
    546 	int sc_nvm_k1_enabled;
    547 
    548 	int sc_nqueues;
    549 	struct wm_queue *sc_queue;
    550 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    551 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    552 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    553 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    554 
    555 	int sc_affinity_offset;
    556 
    557 #ifdef WM_EVENT_COUNTERS
    558 	/* Event counters. */
    559 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    560 
    561 	/* WM_T_82542_2_1 only */
    562 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    563 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    564 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    565 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    566 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    567 #endif /* WM_EVENT_COUNTERS */
    568 
    569 	/* This variable are used only on the 82547. */
    570 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    571 
    572 	uint32_t sc_ctrl;		/* prototype CTRL register */
    573 #if 0
    574 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    575 #endif
    576 	uint32_t sc_icr;		/* prototype interrupt bits */
    577 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    578 	uint32_t sc_tctl;		/* prototype TCTL register */
    579 	uint32_t sc_rctl;		/* prototype RCTL register */
    580 	uint32_t sc_txcw;		/* prototype TXCW register */
    581 	uint32_t sc_tipg;		/* prototype TIPG register */
    582 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    583 	uint32_t sc_pba;		/* prototype PBA register */
    584 
    585 	int sc_tbi_linkup;		/* TBI link status */
    586 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    587 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    588 
    589 	int sc_mchash_type;		/* multicast filter offset */
    590 
    591 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    592 
    593 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    594 	kmutex_t *sc_ich_phymtx;	/*
    595 					 * 82574/82583/ICH/PCH specific PHY
    596 					 * mutex. For 82574/82583, the mutex
    597 					 * is used for both PHY and NVM.
    598 					 */
    599 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    600 
    601 	struct wm_phyop phy;
    602 	struct wm_nvmop nvm;
    603 };
    604 
    605 #define WM_CORE_LOCK(_sc)						\
    606 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    607 #define WM_CORE_UNLOCK(_sc)						\
    608 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    609 #define WM_CORE_LOCKED(_sc)						\
    610 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    611 
    612 #define	WM_RXCHAIN_RESET(rxq)						\
    613 do {									\
    614 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    615 	*(rxq)->rxq_tailp = NULL;					\
    616 	(rxq)->rxq_len = 0;						\
    617 } while (/*CONSTCOND*/0)
    618 
    619 #define	WM_RXCHAIN_LINK(rxq, m)						\
    620 do {									\
    621 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    622 	(rxq)->rxq_tailp = &(m)->m_next;				\
    623 } while (/*CONSTCOND*/0)
    624 
    625 #ifdef WM_EVENT_COUNTERS
    626 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    627 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    628 
    629 #define WM_Q_EVCNT_INCR(qname, evname)			\
    630 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    631 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    632 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    633 #else /* !WM_EVENT_COUNTERS */
    634 #define	WM_EVCNT_INCR(ev)	/* nothing */
    635 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    636 
    637 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    638 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    639 #endif /* !WM_EVENT_COUNTERS */
    640 
    641 #define	CSR_READ(sc, reg)						\
    642 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    643 #define	CSR_WRITE(sc, reg, val)						\
    644 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    645 #define	CSR_WRITE_FLUSH(sc)						\
    646 	(void)CSR_READ((sc), WMREG_STATUS)
    647 
    648 #define ICH8_FLASH_READ32(sc, reg)					\
    649 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    650 	    (reg) + sc->sc_flashreg_offset)
    651 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    652 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    653 	    (reg) + sc->sc_flashreg_offset, (data))
    654 
    655 #define ICH8_FLASH_READ16(sc, reg)					\
    656 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    657 	    (reg) + sc->sc_flashreg_offset)
    658 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    659 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    660 	    (reg) + sc->sc_flashreg_offset, (data))
    661 
    662 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    663 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    664 
    665 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    666 #define	WM_CDTXADDR_HI(txq, x)						\
    667 	(sizeof(bus_addr_t) == 8 ?					\
    668 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    669 
    670 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    671 #define	WM_CDRXADDR_HI(rxq, x)						\
    672 	(sizeof(bus_addr_t) == 8 ?					\
    673 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    674 
    675 /*
    676  * Register read/write functions.
    677  * Other than CSR_{READ|WRITE}().
    678  */
    679 #if 0
    680 static inline uint32_t wm_io_read(struct wm_softc *, int);
    681 #endif
    682 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    683 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    684     uint32_t, uint32_t);
    685 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    686 
    687 /*
    688  * Descriptor sync/init functions.
    689  */
    690 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    691 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    692 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    693 
    694 /*
    695  * Device driver interface functions and commonly used functions.
    696  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    697  */
    698 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    699 static int	wm_match(device_t, cfdata_t, void *);
    700 static void	wm_attach(device_t, device_t, void *);
    701 static int	wm_detach(device_t, int);
    702 static bool	wm_suspend(device_t, const pmf_qual_t *);
    703 static bool	wm_resume(device_t, const pmf_qual_t *);
    704 static void	wm_watchdog(struct ifnet *);
    705 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    706     uint16_t *);
    707 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    708     uint16_t *);
    709 static void	wm_tick(void *);
    710 static int	wm_ifflags_cb(struct ethercom *);
    711 static int	wm_ioctl(struct ifnet *, u_long, void *);
    712 /* MAC address related */
    713 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    714 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    715 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    716 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    717 static int	wm_rar_count(struct wm_softc *);
    718 static void	wm_set_filter(struct wm_softc *);
    719 /* Reset and init related */
    720 static void	wm_set_vlan(struct wm_softc *);
    721 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    722 static void	wm_get_auto_rd_done(struct wm_softc *);
    723 static void	wm_lan_init_done(struct wm_softc *);
    724 static void	wm_get_cfg_done(struct wm_softc *);
    725 static int	wm_phy_post_reset(struct wm_softc *);
    726 static int	wm_write_smbus_addr(struct wm_softc *);
    727 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    728 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    729 static void	wm_initialize_hardware_bits(struct wm_softc *);
    730 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    731 static int	wm_reset_phy(struct wm_softc *);
    732 static void	wm_flush_desc_rings(struct wm_softc *);
    733 static void	wm_reset(struct wm_softc *);
    734 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    735 static void	wm_rxdrain(struct wm_rxqueue *);
    736 static void	wm_init_rss(struct wm_softc *);
    737 static void	wm_adjust_qnum(struct wm_softc *, int);
    738 static inline bool	wm_is_using_msix(struct wm_softc *);
    739 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    740 static int	wm_softint_establish(struct wm_softc *, int, int);
    741 static int	wm_setup_legacy(struct wm_softc *);
    742 static int	wm_setup_msix(struct wm_softc *);
    743 static int	wm_init(struct ifnet *);
    744 static int	wm_init_locked(struct ifnet *);
    745 static void	wm_unset_stopping_flags(struct wm_softc *);
    746 static void	wm_set_stopping_flags(struct wm_softc *);
    747 static void	wm_stop(struct ifnet *, int);
    748 static void	wm_stop_locked(struct ifnet *, int);
    749 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    750 static void	wm_82547_txfifo_stall(void *);
    751 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    752 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    753 /* DMA related */
    754 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    755 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    756 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    757 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    758     struct wm_txqueue *);
    759 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    760 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    761 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    762     struct wm_rxqueue *);
    763 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    764 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    765 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    766 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    767 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    768 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    769 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    770     struct wm_txqueue *);
    771 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    772     struct wm_rxqueue *);
    773 static int	wm_alloc_txrx_queues(struct wm_softc *);
    774 static void	wm_free_txrx_queues(struct wm_softc *);
    775 static int	wm_init_txrx_queues(struct wm_softc *);
    776 /* Start */
    777 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    778     struct wm_txsoft *, uint32_t *, uint8_t *);
    779 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    780 static void	wm_start(struct ifnet *);
    781 static void	wm_start_locked(struct ifnet *);
    782 static int	wm_transmit(struct ifnet *, struct mbuf *);
    783 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    784 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    785     bool);
    786 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    787     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    788 static void	wm_nq_start(struct ifnet *);
    789 static void	wm_nq_start_locked(struct ifnet *);
    790 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    791 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    792 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    793     bool);
    794 static void	wm_deferred_start_locked(struct wm_txqueue *);
    795 static void	wm_handle_queue(void *);
    796 /* Interrupt */
    797 static bool	wm_txeof(struct wm_txqueue *, u_int);
    798 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    799 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    800 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    801 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    802 static void	wm_linkintr(struct wm_softc *, uint32_t);
    803 static int	wm_intr_legacy(void *);
    804 static inline void	wm_txrxintr_disable(struct wm_queue *);
    805 static inline void	wm_txrxintr_enable(struct wm_queue *);
    806 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    807 static int	wm_txrxintr_msix(void *);
    808 static int	wm_linkintr_msix(void *);
    809 
    810 /*
    811  * Media related.
    812  * GMII, SGMII, TBI, SERDES and SFP.
    813  */
    814 /* Common */
    815 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    816 /* GMII related */
    817 static void	wm_gmii_reset(struct wm_softc *);
    818 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    819 static int	wm_get_phy_id_82575(struct wm_softc *);
    820 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    821 static int	wm_gmii_mediachange(struct ifnet *);
    822 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    823 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    824 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    825 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    826 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    827 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    828 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    829 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    830 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    831 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    832 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    833 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    834 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    835 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    836 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    837 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    838 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    839 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    840 	bool);
    841 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    842 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    843 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    844 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    845 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    846 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    847 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    848 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    849 static void	wm_gmii_statchg(struct ifnet *);
    850 /*
    851  * kumeran related (80003, ICH* and PCH*).
    852  * These functions are not for accessing MII registers but for accessing
    853  * kumeran specific registers.
    854  */
    855 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    856 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    857 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    858 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    859 /* EMI register related */
    860 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    861 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    862 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    863 /* SGMII */
    864 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    865 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    866 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    867 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    868 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    869 /* TBI related */
    870 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    871 static void	wm_tbi_mediainit(struct wm_softc *);
    872 static int	wm_tbi_mediachange(struct ifnet *);
    873 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    874 static int	wm_check_for_link(struct wm_softc *);
    875 static void	wm_tbi_tick(struct wm_softc *);
    876 /* SERDES related */
    877 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    878 static int	wm_serdes_mediachange(struct ifnet *);
    879 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    880 static void	wm_serdes_tick(struct wm_softc *);
    881 /* SFP related */
    882 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    883 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    884 
    885 /*
    886  * NVM related.
    887  * Microwire, SPI (w/wo EERD) and Flash.
    888  */
    889 /* Misc functions */
    890 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    891 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    892 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    893 /* Microwire */
    894 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    895 /* SPI */
    896 static int	wm_nvm_ready_spi(struct wm_softc *);
    897 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    898 /* Using with EERD */
    899 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    900 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    901 /* Flash */
    902 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    903     unsigned int *);
    904 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    905 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    906 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    907     uint32_t *);
    908 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    909 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    910 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    911 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    912 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    913 /* iNVM */
    914 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    915 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    916 /* Lock, detecting NVM type, validate checksum and read */
    917 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    918 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    919 static int	wm_nvm_validate_checksum(struct wm_softc *);
    920 static void	wm_nvm_version_invm(struct wm_softc *);
    921 static void	wm_nvm_version(struct wm_softc *);
    922 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    923 
    924 /*
    925  * Hardware semaphores.
    926  * Very complexed...
    927  */
    928 static int	wm_get_null(struct wm_softc *);
    929 static void	wm_put_null(struct wm_softc *);
    930 static int	wm_get_eecd(struct wm_softc *);
    931 static void	wm_put_eecd(struct wm_softc *);
    932 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    933 static void	wm_put_swsm_semaphore(struct wm_softc *);
    934 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    935 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    936 static int	wm_get_nvm_80003(struct wm_softc *);
    937 static void	wm_put_nvm_80003(struct wm_softc *);
    938 static int	wm_get_nvm_82571(struct wm_softc *);
    939 static void	wm_put_nvm_82571(struct wm_softc *);
    940 static int	wm_get_phy_82575(struct wm_softc *);
    941 static void	wm_put_phy_82575(struct wm_softc *);
    942 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    943 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    944 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    945 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    946 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    947 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    948 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    949 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    950 
    951 /*
    952  * Management mode and power management related subroutines.
    953  * BMC, AMT, suspend/resume and EEE.
    954  */
    955 #if 0
    956 static int	wm_check_mng_mode(struct wm_softc *);
    957 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    958 static int	wm_check_mng_mode_82574(struct wm_softc *);
    959 static int	wm_check_mng_mode_generic(struct wm_softc *);
    960 #endif
    961 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    962 static bool	wm_phy_resetisblocked(struct wm_softc *);
    963 static void	wm_get_hw_control(struct wm_softc *);
    964 static void	wm_release_hw_control(struct wm_softc *);
    965 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    966 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    967 static void	wm_init_manageability(struct wm_softc *);
    968 static void	wm_release_manageability(struct wm_softc *);
    969 static void	wm_get_wakeup(struct wm_softc *);
    970 static int	wm_ulp_disable(struct wm_softc *);
    971 static int	wm_enable_phy_wakeup(struct wm_softc *);
    972 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    973 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    974 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    975 static void	wm_enable_wakeup(struct wm_softc *);
    976 static void	wm_disable_aspm(struct wm_softc *);
    977 /* LPLU (Low Power Link Up) */
    978 static void	wm_lplu_d0_disable(struct wm_softc *);
    979 /* EEE */
    980 static int	wm_set_eee_i350(struct wm_softc *);
    981 static int	wm_set_eee_pchlan(struct wm_softc *);
    982 static int	wm_set_eee(struct wm_softc *);
    983 
    984 /*
    985  * Workarounds (mainly PHY related).
    986  * Basically, PHY's workarounds are in the PHY drivers.
    987  */
    988 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    989 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    990 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
    991 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
    992 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
    993 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    994 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    995 static int	wm_k1_workaround_lv(struct wm_softc *);
    996 static int	wm_link_stall_workaround_hv(struct wm_softc *);
    997 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    998 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    999 static void	wm_reset_init_script_82575(struct wm_softc *);
   1000 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1001 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1002 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1003 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1004 static int	wm_pll_workaround_i210(struct wm_softc *);
   1005 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1006 
   1007 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1008     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1009 
   1010 /*
   1011  * Devices supported by this driver.
   1012  */
   1013 static const struct wm_product {
   1014 	pci_vendor_id_t		wmp_vendor;
   1015 	pci_product_id_t	wmp_product;
   1016 	const char		*wmp_name;
   1017 	wm_chip_type		wmp_type;
   1018 	uint32_t		wmp_flags;
   1019 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1020 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1021 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1022 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1023 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1024 } wm_products[] = {
   1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1026 	  "Intel i82542 1000BASE-X Ethernet",
   1027 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1028 
   1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1030 	  "Intel i82543GC 1000BASE-X Ethernet",
   1031 	  WM_T_82543,		WMP_F_FIBER },
   1032 
   1033 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1034 	  "Intel i82543GC 1000BASE-T Ethernet",
   1035 	  WM_T_82543,		WMP_F_COPPER },
   1036 
   1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1038 	  "Intel i82544EI 1000BASE-T Ethernet",
   1039 	  WM_T_82544,		WMP_F_COPPER },
   1040 
   1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1042 	  "Intel i82544EI 1000BASE-X Ethernet",
   1043 	  WM_T_82544,		WMP_F_FIBER },
   1044 
   1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1046 	  "Intel i82544GC 1000BASE-T Ethernet",
   1047 	  WM_T_82544,		WMP_F_COPPER },
   1048 
   1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1050 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1051 	  WM_T_82544,		WMP_F_COPPER },
   1052 
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1054 	  "Intel i82540EM 1000BASE-T Ethernet",
   1055 	  WM_T_82540,		WMP_F_COPPER },
   1056 
   1057 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1058 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1059 	  WM_T_82540,		WMP_F_COPPER },
   1060 
   1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1062 	  "Intel i82540EP 1000BASE-T Ethernet",
   1063 	  WM_T_82540,		WMP_F_COPPER },
   1064 
   1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1066 	  "Intel i82540EP 1000BASE-T Ethernet",
   1067 	  WM_T_82540,		WMP_F_COPPER },
   1068 
   1069 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1070 	  "Intel i82540EP 1000BASE-T Ethernet",
   1071 	  WM_T_82540,		WMP_F_COPPER },
   1072 
   1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1074 	  "Intel i82545EM 1000BASE-T Ethernet",
   1075 	  WM_T_82545,		WMP_F_COPPER },
   1076 
   1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1078 	  "Intel i82545GM 1000BASE-T Ethernet",
   1079 	  WM_T_82545_3,		WMP_F_COPPER },
   1080 
   1081 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1082 	  "Intel i82545GM 1000BASE-X Ethernet",
   1083 	  WM_T_82545_3,		WMP_F_FIBER },
   1084 
   1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1086 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1087 	  WM_T_82545_3,		WMP_F_SERDES },
   1088 
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1090 	  "Intel i82546EB 1000BASE-T Ethernet",
   1091 	  WM_T_82546,		WMP_F_COPPER },
   1092 
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1094 	  "Intel i82546EB 1000BASE-T Ethernet",
   1095 	  WM_T_82546,		WMP_F_COPPER },
   1096 
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1098 	  "Intel i82545EM 1000BASE-X Ethernet",
   1099 	  WM_T_82545,		WMP_F_FIBER },
   1100 
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1102 	  "Intel i82546EB 1000BASE-X Ethernet",
   1103 	  WM_T_82546,		WMP_F_FIBER },
   1104 
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1106 	  "Intel i82546GB 1000BASE-T Ethernet",
   1107 	  WM_T_82546_3,		WMP_F_COPPER },
   1108 
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1110 	  "Intel i82546GB 1000BASE-X Ethernet",
   1111 	  WM_T_82546_3,		WMP_F_FIBER },
   1112 
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1114 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1115 	  WM_T_82546_3,		WMP_F_SERDES },
   1116 
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1118 	  "i82546GB quad-port Gigabit Ethernet",
   1119 	  WM_T_82546_3,		WMP_F_COPPER },
   1120 
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1122 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1123 	  WM_T_82546_3,		WMP_F_COPPER },
   1124 
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1126 	  "Intel PRO/1000MT (82546GB)",
   1127 	  WM_T_82546_3,		WMP_F_COPPER },
   1128 
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1130 	  "Intel i82541EI 1000BASE-T Ethernet",
   1131 	  WM_T_82541,		WMP_F_COPPER },
   1132 
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1134 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1135 	  WM_T_82541,		WMP_F_COPPER },
   1136 
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1138 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1139 	  WM_T_82541,		WMP_F_COPPER },
   1140 
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1142 	  "Intel i82541ER 1000BASE-T Ethernet",
   1143 	  WM_T_82541_2,		WMP_F_COPPER },
   1144 
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1146 	  "Intel i82541GI 1000BASE-T Ethernet",
   1147 	  WM_T_82541_2,		WMP_F_COPPER },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1150 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1151 	  WM_T_82541_2,		WMP_F_COPPER },
   1152 
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1154 	  "Intel i82541PI 1000BASE-T Ethernet",
   1155 	  WM_T_82541_2,		WMP_F_COPPER },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1158 	  "Intel i82547EI 1000BASE-T Ethernet",
   1159 	  WM_T_82547,		WMP_F_COPPER },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1162 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1163 	  WM_T_82547,		WMP_F_COPPER },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1166 	  "Intel i82547GI 1000BASE-T Ethernet",
   1167 	  WM_T_82547_2,		WMP_F_COPPER },
   1168 
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1170 	  "Intel PRO/1000 PT (82571EB)",
   1171 	  WM_T_82571,		WMP_F_COPPER },
   1172 
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1174 	  "Intel PRO/1000 PF (82571EB)",
   1175 	  WM_T_82571,		WMP_F_FIBER },
   1176 
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1178 	  "Intel PRO/1000 PB (82571EB)",
   1179 	  WM_T_82571,		WMP_F_SERDES },
   1180 
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1182 	  "Intel PRO/1000 QT (82571EB)",
   1183 	  WM_T_82571,		WMP_F_COPPER },
   1184 
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1186 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1187 	  WM_T_82571,		WMP_F_COPPER, },
   1188 
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1190 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1191 	  WM_T_82571,		WMP_F_COPPER, },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1194 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1195 	  WM_T_82571,		WMP_F_SERDES, },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1198 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1199 	  WM_T_82571,		WMP_F_SERDES, },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1202 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1203 	  WM_T_82571,		WMP_F_FIBER, },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1206 	  "Intel i82572EI 1000baseT Ethernet",
   1207 	  WM_T_82572,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1210 	  "Intel i82572EI 1000baseX Ethernet",
   1211 	  WM_T_82572,		WMP_F_FIBER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1214 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1215 	  WM_T_82572,		WMP_F_SERDES },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1218 	  "Intel i82572EI 1000baseT Ethernet",
   1219 	  WM_T_82572,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1222 	  "Intel i82573E",
   1223 	  WM_T_82573,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1226 	  "Intel i82573E IAMT",
   1227 	  WM_T_82573,		WMP_F_COPPER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1230 	  "Intel i82573L Gigabit Ethernet",
   1231 	  WM_T_82573,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1234 	  "Intel i82574L",
   1235 	  WM_T_82574,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1238 	  "Intel i82574L",
   1239 	  WM_T_82574,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1242 	  "Intel i82583V",
   1243 	  WM_T_82583,		WMP_F_COPPER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1246 	  "i80003 dual 1000baseT Ethernet",
   1247 	  WM_T_80003,		WMP_F_COPPER },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1250 	  "i80003 dual 1000baseX Ethernet",
   1251 	  WM_T_80003,		WMP_F_COPPER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1254 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1255 	  WM_T_80003,		WMP_F_SERDES },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1258 	  "Intel i80003 1000baseT Ethernet",
   1259 	  WM_T_80003,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1262 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1263 	  WM_T_80003,		WMP_F_SERDES },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1266 	  "Intel i82801H (M_AMT) LAN Controller",
   1267 	  WM_T_ICH8,		WMP_F_COPPER },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1269 	  "Intel i82801H (AMT) LAN Controller",
   1270 	  WM_T_ICH8,		WMP_F_COPPER },
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1272 	  "Intel i82801H LAN Controller",
   1273 	  WM_T_ICH8,		WMP_F_COPPER },
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1275 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1276 	  WM_T_ICH8,		WMP_F_COPPER },
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1278 	  "Intel i82801H (M) LAN Controller",
   1279 	  WM_T_ICH8,		WMP_F_COPPER },
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1281 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1282 	  WM_T_ICH8,		WMP_F_COPPER },
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1284 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1285 	  WM_T_ICH8,		WMP_F_COPPER },
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1287 	  "82567V-3 LAN Controller",
   1288 	  WM_T_ICH8,		WMP_F_COPPER },
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1290 	  "82801I (AMT) LAN Controller",
   1291 	  WM_T_ICH9,		WMP_F_COPPER },
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1293 	  "82801I 10/100 LAN Controller",
   1294 	  WM_T_ICH9,		WMP_F_COPPER },
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1296 	  "82801I (G) 10/100 LAN Controller",
   1297 	  WM_T_ICH9,		WMP_F_COPPER },
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1299 	  "82801I (GT) 10/100 LAN Controller",
   1300 	  WM_T_ICH9,		WMP_F_COPPER },
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1302 	  "82801I (C) LAN Controller",
   1303 	  WM_T_ICH9,		WMP_F_COPPER },
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1305 	  "82801I mobile LAN Controller",
   1306 	  WM_T_ICH9,		WMP_F_COPPER },
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1308 	  "82801I mobile (V) LAN Controller",
   1309 	  WM_T_ICH9,		WMP_F_COPPER },
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1311 	  "82801I mobile (AMT) LAN Controller",
   1312 	  WM_T_ICH9,		WMP_F_COPPER },
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1314 	  "82567LM-4 LAN Controller",
   1315 	  WM_T_ICH9,		WMP_F_COPPER },
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1317 	  "82567LM-2 LAN Controller",
   1318 	  WM_T_ICH10,		WMP_F_COPPER },
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1320 	  "82567LF-2 LAN Controller",
   1321 	  WM_T_ICH10,		WMP_F_COPPER },
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1323 	  "82567LM-3 LAN Controller",
   1324 	  WM_T_ICH10,		WMP_F_COPPER },
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1326 	  "82567LF-3 LAN Controller",
   1327 	  WM_T_ICH10,		WMP_F_COPPER },
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1329 	  "82567V-2 LAN Controller",
   1330 	  WM_T_ICH10,		WMP_F_COPPER },
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1332 	  "82567V-3? LAN Controller",
   1333 	  WM_T_ICH10,		WMP_F_COPPER },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1335 	  "HANKSVILLE LAN Controller",
   1336 	  WM_T_ICH10,		WMP_F_COPPER },
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1338 	  "PCH LAN (82577LM) Controller",
   1339 	  WM_T_PCH,		WMP_F_COPPER },
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1341 	  "PCH LAN (82577LC) Controller",
   1342 	  WM_T_PCH,		WMP_F_COPPER },
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1344 	  "PCH LAN (82578DM) Controller",
   1345 	  WM_T_PCH,		WMP_F_COPPER },
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1347 	  "PCH LAN (82578DC) Controller",
   1348 	  WM_T_PCH,		WMP_F_COPPER },
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1350 	  "PCH2 LAN (82579LM) Controller",
   1351 	  WM_T_PCH2,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1353 	  "PCH2 LAN (82579V) Controller",
   1354 	  WM_T_PCH2,		WMP_F_COPPER },
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1356 	  "82575EB dual-1000baseT Ethernet",
   1357 	  WM_T_82575,		WMP_F_COPPER },
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1359 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1360 	  WM_T_82575,		WMP_F_SERDES },
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1362 	  "82575GB quad-1000baseT Ethernet",
   1363 	  WM_T_82575,		WMP_F_COPPER },
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1365 	  "82575GB quad-1000baseT Ethernet (PM)",
   1366 	  WM_T_82575,		WMP_F_COPPER },
   1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1368 	  "82576 1000BaseT Ethernet",
   1369 	  WM_T_82576,		WMP_F_COPPER },
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1371 	  "82576 1000BaseX Ethernet",
   1372 	  WM_T_82576,		WMP_F_FIBER },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1375 	  "82576 gigabit Ethernet (SERDES)",
   1376 	  WM_T_82576,		WMP_F_SERDES },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1379 	  "82576 quad-1000BaseT Ethernet",
   1380 	  WM_T_82576,		WMP_F_COPPER },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1383 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1384 	  WM_T_82576,		WMP_F_COPPER },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1387 	  "82576 gigabit Ethernet",
   1388 	  WM_T_82576,		WMP_F_COPPER },
   1389 
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1391 	  "82576 gigabit Ethernet (SERDES)",
   1392 	  WM_T_82576,		WMP_F_SERDES },
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1394 	  "82576 quad-gigabit Ethernet (SERDES)",
   1395 	  WM_T_82576,		WMP_F_SERDES },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1398 	  "82580 1000BaseT Ethernet",
   1399 	  WM_T_82580,		WMP_F_COPPER },
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1401 	  "82580 1000BaseX Ethernet",
   1402 	  WM_T_82580,		WMP_F_FIBER },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1405 	  "82580 1000BaseT Ethernet (SERDES)",
   1406 	  WM_T_82580,		WMP_F_SERDES },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1409 	  "82580 gigabit Ethernet (SGMII)",
   1410 	  WM_T_82580,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1412 	  "82580 dual-1000BaseT Ethernet",
   1413 	  WM_T_82580,		WMP_F_COPPER },
   1414 
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1416 	  "82580 quad-1000BaseX Ethernet",
   1417 	  WM_T_82580,		WMP_F_FIBER },
   1418 
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1420 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1421 	  WM_T_82580,		WMP_F_COPPER },
   1422 
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1424 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1425 	  WM_T_82580,		WMP_F_SERDES },
   1426 
   1427 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1428 	  "DH89XXCC 1000BASE-KX Ethernet",
   1429 	  WM_T_82580,		WMP_F_SERDES },
   1430 
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1432 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1433 	  WM_T_82580,		WMP_F_SERDES },
   1434 
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1436 	  "I350 Gigabit Network Connection",
   1437 	  WM_T_I350,		WMP_F_COPPER },
   1438 
   1439 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1440 	  "I350 Gigabit Fiber Network Connection",
   1441 	  WM_T_I350,		WMP_F_FIBER },
   1442 
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1444 	  "I350 Gigabit Backplane Connection",
   1445 	  WM_T_I350,		WMP_F_SERDES },
   1446 
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1448 	  "I350 Quad Port Gigabit Ethernet",
   1449 	  WM_T_I350,		WMP_F_SERDES },
   1450 
   1451 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1452 	  "I350 Gigabit Connection",
   1453 	  WM_T_I350,		WMP_F_COPPER },
   1454 
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1456 	  "I354 Gigabit Ethernet (KX)",
   1457 	  WM_T_I354,		WMP_F_SERDES },
   1458 
   1459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1460 	  "I354 Gigabit Ethernet (SGMII)",
   1461 	  WM_T_I354,		WMP_F_COPPER },
   1462 
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1464 	  "I354 Gigabit Ethernet (2.5G)",
   1465 	  WM_T_I354,		WMP_F_COPPER },
   1466 
   1467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1468 	  "I210-T1 Ethernet Server Adapter",
   1469 	  WM_T_I210,		WMP_F_COPPER },
   1470 
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1472 	  "I210 Ethernet (Copper OEM)",
   1473 	  WM_T_I210,		WMP_F_COPPER },
   1474 
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1476 	  "I210 Ethernet (Copper IT)",
   1477 	  WM_T_I210,		WMP_F_COPPER },
   1478 
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1480 	  "I210 Ethernet (Copper, FLASH less)",
   1481 	  WM_T_I210,		WMP_F_COPPER },
   1482 
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1484 	  "I210 Gigabit Ethernet (Fiber)",
   1485 	  WM_T_I210,		WMP_F_FIBER },
   1486 
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1488 	  "I210 Gigabit Ethernet (SERDES)",
   1489 	  WM_T_I210,		WMP_F_SERDES },
   1490 
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1492 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1493 	  WM_T_I210,		WMP_F_SERDES },
   1494 
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1496 	  "I210 Gigabit Ethernet (SGMII)",
   1497 	  WM_T_I210,		WMP_F_COPPER },
   1498 
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1500 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1501 	  WM_T_I210,		WMP_F_COPPER },
   1502 
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1504 	  "I211 Ethernet (COPPER)",
   1505 	  WM_T_I211,		WMP_F_COPPER },
   1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1507 	  "I217 V Ethernet Connection",
   1508 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1510 	  "I217 LM Ethernet Connection",
   1511 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1512 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1513 	  "I218 V Ethernet Connection",
   1514 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1516 	  "I218 V Ethernet Connection",
   1517 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1519 	  "I218 V Ethernet Connection",
   1520 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1522 	  "I218 LM Ethernet Connection",
   1523 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1525 	  "I218 LM Ethernet Connection",
   1526 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1528 	  "I218 LM Ethernet Connection",
   1529 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1531 	  "I219 LM Ethernet Connection",
   1532 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1534 	  "I219 LM Ethernet Connection",
   1535 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1537 	  "I219 LM Ethernet Connection",
   1538 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1540 	  "I219 LM Ethernet Connection",
   1541 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1543 	  "I219 LM Ethernet Connection",
   1544 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1546 	  "I219 LM Ethernet Connection",
   1547 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1548 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1549 	  "I219 LM Ethernet Connection",
   1550 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1552 	  "I219 LM Ethernet Connection",
   1553 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1555 	  "I219 LM Ethernet Connection",
   1556 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1557 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1558 	  "I219 V Ethernet Connection",
   1559 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1561 	  "I219 V Ethernet Connection",
   1562 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1564 	  "I219 V Ethernet Connection",
   1565 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1566 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1567 	  "I219 V Ethernet Connection",
   1568 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1569 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1570 	  "I219 V Ethernet Connection",
   1571 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1573 	  "I219 V Ethernet Connection",
   1574 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1575 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1576 	  "I219 V Ethernet Connection",
   1577 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1579 	  "I219 V Ethernet Connection",
   1580 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1581 	{ 0,			0,
   1582 	  NULL,
   1583 	  0,			0 },
   1584 };
   1585 
   1586 /*
   1587  * Register read/write functions.
   1588  * Other than CSR_{READ|WRITE}().
   1589  */
   1590 
   1591 #if 0 /* Not currently used */
   1592 static inline uint32_t
   1593 wm_io_read(struct wm_softc *sc, int reg)
   1594 {
   1595 
   1596 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1597 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1598 }
   1599 #endif
   1600 
   1601 static inline void
   1602 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1603 {
   1604 
   1605 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1606 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1607 }
   1608 
   1609 static inline void
   1610 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1611     uint32_t data)
   1612 {
   1613 	uint32_t regval;
   1614 	int i;
   1615 
   1616 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1617 
   1618 	CSR_WRITE(sc, reg, regval);
   1619 
   1620 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1621 		delay(5);
   1622 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1623 			break;
   1624 	}
   1625 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1626 		aprint_error("%s: WARNING:"
   1627 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1628 		    device_xname(sc->sc_dev), reg);
   1629 	}
   1630 }
   1631 
   1632 static inline void
   1633 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1634 {
   1635 	wa->wa_low = htole32(v & 0xffffffffU);
   1636 	if (sizeof(bus_addr_t) == 8)
   1637 		wa->wa_high = htole32((uint64_t) v >> 32);
   1638 	else
   1639 		wa->wa_high = 0;
   1640 }
   1641 
   1642 /*
   1643  * Descriptor sync/init functions.
   1644  */
   1645 static inline void
   1646 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1647 {
   1648 	struct wm_softc *sc = txq->txq_sc;
   1649 
   1650 	/* If it will wrap around, sync to the end of the ring. */
   1651 	if ((start + num) > WM_NTXDESC(txq)) {
   1652 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1653 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1654 		    (WM_NTXDESC(txq) - start), ops);
   1655 		num -= (WM_NTXDESC(txq) - start);
   1656 		start = 0;
   1657 	}
   1658 
   1659 	/* Now sync whatever is left. */
   1660 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1661 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1662 }
   1663 
   1664 static inline void
   1665 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1666 {
   1667 	struct wm_softc *sc = rxq->rxq_sc;
   1668 
   1669 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1670 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1671 }
   1672 
   1673 static inline void
   1674 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1675 {
   1676 	struct wm_softc *sc = rxq->rxq_sc;
   1677 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1678 	struct mbuf *m = rxs->rxs_mbuf;
   1679 
   1680 	/*
   1681 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1682 	 * so that the payload after the Ethernet header is aligned
   1683 	 * to a 4-byte boundary.
   1684 
   1685 	 * XXX BRAINDAMAGE ALERT!
   1686 	 * The stupid chip uses the same size for every buffer, which
   1687 	 * is set in the Receive Control register.  We are using the 2K
   1688 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1689 	 * reason, we can't "scoot" packets longer than the standard
   1690 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1691 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1692 	 * the upper layer copy the headers.
   1693 	 */
   1694 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1695 
   1696 	if (sc->sc_type == WM_T_82574) {
   1697 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1698 		rxd->erx_data.erxd_addr =
   1699 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1700 		rxd->erx_data.erxd_dd = 0;
   1701 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1702 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1703 
   1704 		rxd->nqrx_data.nrxd_paddr =
   1705 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1706 		/* Currently, split header is not supported. */
   1707 		rxd->nqrx_data.nrxd_haddr = 0;
   1708 	} else {
   1709 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1710 
   1711 		wm_set_dma_addr(&rxd->wrx_addr,
   1712 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1713 		rxd->wrx_len = 0;
   1714 		rxd->wrx_cksum = 0;
   1715 		rxd->wrx_status = 0;
   1716 		rxd->wrx_errors = 0;
   1717 		rxd->wrx_special = 0;
   1718 	}
   1719 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1720 
   1721 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1722 }
   1723 
   1724 /*
   1725  * Device driver interface functions and commonly used functions.
   1726  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1727  */
   1728 
   1729 /* Lookup supported device table */
   1730 static const struct wm_product *
   1731 wm_lookup(const struct pci_attach_args *pa)
   1732 {
   1733 	const struct wm_product *wmp;
   1734 
   1735 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1736 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1737 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1738 			return wmp;
   1739 	}
   1740 	return NULL;
   1741 }
   1742 
   1743 /* The match function (ca_match) */
   1744 static int
   1745 wm_match(device_t parent, cfdata_t cf, void *aux)
   1746 {
   1747 	struct pci_attach_args *pa = aux;
   1748 
   1749 	if (wm_lookup(pa) != NULL)
   1750 		return 1;
   1751 
   1752 	return 0;
   1753 }
   1754 
   1755 /* The attach function (ca_attach) */
   1756 static void
   1757 wm_attach(device_t parent, device_t self, void *aux)
   1758 {
   1759 	struct wm_softc *sc = device_private(self);
   1760 	struct pci_attach_args *pa = aux;
   1761 	prop_dictionary_t dict;
   1762 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1763 	pci_chipset_tag_t pc = pa->pa_pc;
   1764 	int counts[PCI_INTR_TYPE_SIZE];
   1765 	pci_intr_type_t max_type;
   1766 	const char *eetype, *xname;
   1767 	bus_space_tag_t memt;
   1768 	bus_space_handle_t memh;
   1769 	bus_size_t memsize;
   1770 	int memh_valid;
   1771 	int i, error;
   1772 	const struct wm_product *wmp;
   1773 	prop_data_t ea;
   1774 	prop_number_t pn;
   1775 	uint8_t enaddr[ETHER_ADDR_LEN];
   1776 	char buf[256];
   1777 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1778 	pcireg_t preg, memtype;
   1779 	uint16_t eeprom_data, apme_mask;
   1780 	bool force_clear_smbi;
   1781 	uint32_t link_mode;
   1782 	uint32_t reg;
   1783 
   1784 	sc->sc_dev = self;
   1785 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1786 	sc->sc_core_stopping = false;
   1787 
   1788 	wmp = wm_lookup(pa);
   1789 #ifdef DIAGNOSTIC
   1790 	if (wmp == NULL) {
   1791 		printf("\n");
   1792 		panic("wm_attach: impossible");
   1793 	}
   1794 #endif
   1795 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1796 
   1797 	sc->sc_pc = pa->pa_pc;
   1798 	sc->sc_pcitag = pa->pa_tag;
   1799 
   1800 	if (pci_dma64_available(pa))
   1801 		sc->sc_dmat = pa->pa_dmat64;
   1802 	else
   1803 		sc->sc_dmat = pa->pa_dmat;
   1804 
   1805 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1806 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1807 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1808 
   1809 	sc->sc_type = wmp->wmp_type;
   1810 
   1811 	/* Set default function pointers */
   1812 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1813 	sc->phy.release = sc->nvm.release = wm_put_null;
   1814 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1815 
   1816 	if (sc->sc_type < WM_T_82543) {
   1817 		if (sc->sc_rev < 2) {
   1818 			aprint_error_dev(sc->sc_dev,
   1819 			    "i82542 must be at least rev. 2\n");
   1820 			return;
   1821 		}
   1822 		if (sc->sc_rev < 3)
   1823 			sc->sc_type = WM_T_82542_2_0;
   1824 	}
   1825 
   1826 	/*
   1827 	 * Disable MSI for Errata:
   1828 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1829 	 *
   1830 	 *  82544: Errata 25
   1831 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1832 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1833 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1834 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1835 	 *
   1836 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1837 	 *
   1838 	 *  82571 & 82572: Errata 63
   1839 	 */
   1840 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1841 	    || (sc->sc_type == WM_T_82572))
   1842 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1843 
   1844 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1845 	    || (sc->sc_type == WM_T_82580)
   1846 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1847 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1848 		sc->sc_flags |= WM_F_NEWQUEUE;
   1849 
   1850 	/* Set device properties (mactype) */
   1851 	dict = device_properties(sc->sc_dev);
   1852 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1853 
   1854 	/*
   1855 	 * Map the device.  All devices support memory-mapped acccess,
   1856 	 * and it is really required for normal operation.
   1857 	 */
   1858 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1859 	switch (memtype) {
   1860 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1861 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1862 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1863 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1864 		break;
   1865 	default:
   1866 		memh_valid = 0;
   1867 		break;
   1868 	}
   1869 
   1870 	if (memh_valid) {
   1871 		sc->sc_st = memt;
   1872 		sc->sc_sh = memh;
   1873 		sc->sc_ss = memsize;
   1874 	} else {
   1875 		aprint_error_dev(sc->sc_dev,
   1876 		    "unable to map device registers\n");
   1877 		return;
   1878 	}
   1879 
   1880 	/*
   1881 	 * In addition, i82544 and later support I/O mapped indirect
   1882 	 * register access.  It is not desirable (nor supported in
   1883 	 * this driver) to use it for normal operation, though it is
   1884 	 * required to work around bugs in some chip versions.
   1885 	 */
   1886 	if (sc->sc_type >= WM_T_82544) {
   1887 		/* First we have to find the I/O BAR. */
   1888 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1889 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1890 			if (memtype == PCI_MAPREG_TYPE_IO)
   1891 				break;
   1892 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1893 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1894 				i += 4;	/* skip high bits, too */
   1895 		}
   1896 		if (i < PCI_MAPREG_END) {
   1897 			/*
   1898 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1899 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1900 			 * It's no problem because newer chips has no this
   1901 			 * bug.
   1902 			 *
   1903 			 * The i8254x doesn't apparently respond when the
   1904 			 * I/O BAR is 0, which looks somewhat like it's not
   1905 			 * been configured.
   1906 			 */
   1907 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1908 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1909 				aprint_error_dev(sc->sc_dev,
   1910 				    "WARNING: I/O BAR at zero.\n");
   1911 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1912 					0, &sc->sc_iot, &sc->sc_ioh,
   1913 					NULL, &sc->sc_ios) == 0) {
   1914 				sc->sc_flags |= WM_F_IOH_VALID;
   1915 			} else
   1916 				aprint_error_dev(sc->sc_dev,
   1917 				    "WARNING: unable to map I/O space\n");
   1918 		}
   1919 
   1920 	}
   1921 
   1922 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1923 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1924 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1925 	if (sc->sc_type < WM_T_82542_2_1)
   1926 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1927 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1928 
   1929 	/* Power up chip */
   1930 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1931 	    && error != EOPNOTSUPP) {
   1932 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1933 		return;
   1934 	}
   1935 
   1936 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1937 	/*
   1938 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1939 	 * resource.
   1940 	 */
   1941 	if (sc->sc_nqueues > 1) {
   1942 		max_type = PCI_INTR_TYPE_MSIX;
   1943 		/*
   1944 		 *  82583 has a MSI-X capability in the PCI configuration space
   1945 		 * but it doesn't support it. At least the document doesn't
   1946 		 * say anything about MSI-X.
   1947 		 */
   1948 		counts[PCI_INTR_TYPE_MSIX]
   1949 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1950 	} else {
   1951 		max_type = PCI_INTR_TYPE_MSI;
   1952 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1953 	}
   1954 
   1955 	/* Allocation settings */
   1956 	counts[PCI_INTR_TYPE_MSI] = 1;
   1957 	counts[PCI_INTR_TYPE_INTX] = 1;
   1958 	/* overridden by disable flags */
   1959 	if (wm_disable_msi != 0) {
   1960 		counts[PCI_INTR_TYPE_MSI] = 0;
   1961 		if (wm_disable_msix != 0) {
   1962 			max_type = PCI_INTR_TYPE_INTX;
   1963 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1964 		}
   1965 	} else if (wm_disable_msix != 0) {
   1966 		max_type = PCI_INTR_TYPE_MSI;
   1967 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1968 	}
   1969 
   1970 alloc_retry:
   1971 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1972 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1973 		return;
   1974 	}
   1975 
   1976 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1977 		error = wm_setup_msix(sc);
   1978 		if (error) {
   1979 			pci_intr_release(pc, sc->sc_intrs,
   1980 			    counts[PCI_INTR_TYPE_MSIX]);
   1981 
   1982 			/* Setup for MSI: Disable MSI-X */
   1983 			max_type = PCI_INTR_TYPE_MSI;
   1984 			counts[PCI_INTR_TYPE_MSI] = 1;
   1985 			counts[PCI_INTR_TYPE_INTX] = 1;
   1986 			goto alloc_retry;
   1987 		}
   1988 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1989 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   1990 		error = wm_setup_legacy(sc);
   1991 		if (error) {
   1992 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1993 			    counts[PCI_INTR_TYPE_MSI]);
   1994 
   1995 			/* The next try is for INTx: Disable MSI */
   1996 			max_type = PCI_INTR_TYPE_INTX;
   1997 			counts[PCI_INTR_TYPE_INTX] = 1;
   1998 			goto alloc_retry;
   1999 		}
   2000 	} else {
   2001 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2002 		error = wm_setup_legacy(sc);
   2003 		if (error) {
   2004 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2005 			    counts[PCI_INTR_TYPE_INTX]);
   2006 			return;
   2007 		}
   2008 	}
   2009 
   2010 	/*
   2011 	 * Check the function ID (unit number of the chip).
   2012 	 */
   2013 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2014 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2015 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2016 	    || (sc->sc_type == WM_T_82580)
   2017 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2018 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2019 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2020 	else
   2021 		sc->sc_funcid = 0;
   2022 
   2023 	/*
   2024 	 * Determine a few things about the bus we're connected to.
   2025 	 */
   2026 	if (sc->sc_type < WM_T_82543) {
   2027 		/* We don't really know the bus characteristics here. */
   2028 		sc->sc_bus_speed = 33;
   2029 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2030 		/*
   2031 		 * CSA (Communication Streaming Architecture) is about as fast
   2032 		 * a 32-bit 66MHz PCI Bus.
   2033 		 */
   2034 		sc->sc_flags |= WM_F_CSA;
   2035 		sc->sc_bus_speed = 66;
   2036 		aprint_verbose_dev(sc->sc_dev,
   2037 		    "Communication Streaming Architecture\n");
   2038 		if (sc->sc_type == WM_T_82547) {
   2039 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2040 			callout_setfunc(&sc->sc_txfifo_ch,
   2041 			    wm_82547_txfifo_stall, sc);
   2042 			aprint_verbose_dev(sc->sc_dev,
   2043 			    "using 82547 Tx FIFO stall work-around\n");
   2044 		}
   2045 	} else if (sc->sc_type >= WM_T_82571) {
   2046 		sc->sc_flags |= WM_F_PCIE;
   2047 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2048 		    && (sc->sc_type != WM_T_ICH10)
   2049 		    && (sc->sc_type != WM_T_PCH)
   2050 		    && (sc->sc_type != WM_T_PCH2)
   2051 		    && (sc->sc_type != WM_T_PCH_LPT)
   2052 		    && (sc->sc_type != WM_T_PCH_SPT)
   2053 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2054 			/* ICH* and PCH* have no PCIe capability registers */
   2055 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2056 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2057 				NULL) == 0)
   2058 				aprint_error_dev(sc->sc_dev,
   2059 				    "unable to find PCIe capability\n");
   2060 		}
   2061 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2062 	} else {
   2063 		reg = CSR_READ(sc, WMREG_STATUS);
   2064 		if (reg & STATUS_BUS64)
   2065 			sc->sc_flags |= WM_F_BUS64;
   2066 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2067 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2068 
   2069 			sc->sc_flags |= WM_F_PCIX;
   2070 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2071 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2072 				aprint_error_dev(sc->sc_dev,
   2073 				    "unable to find PCIX capability\n");
   2074 			else if (sc->sc_type != WM_T_82545_3 &&
   2075 				 sc->sc_type != WM_T_82546_3) {
   2076 				/*
   2077 				 * Work around a problem caused by the BIOS
   2078 				 * setting the max memory read byte count
   2079 				 * incorrectly.
   2080 				 */
   2081 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2082 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2083 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2084 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2085 
   2086 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2087 				    PCIX_CMD_BYTECNT_SHIFT;
   2088 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2089 				    PCIX_STATUS_MAXB_SHIFT;
   2090 				if (bytecnt > maxb) {
   2091 					aprint_verbose_dev(sc->sc_dev,
   2092 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2093 					    512 << bytecnt, 512 << maxb);
   2094 					pcix_cmd = (pcix_cmd &
   2095 					    ~PCIX_CMD_BYTECNT_MASK) |
   2096 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2097 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2098 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2099 					    pcix_cmd);
   2100 				}
   2101 			}
   2102 		}
   2103 		/*
   2104 		 * The quad port adapter is special; it has a PCIX-PCIX
   2105 		 * bridge on the board, and can run the secondary bus at
   2106 		 * a higher speed.
   2107 		 */
   2108 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2109 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2110 								      : 66;
   2111 		} else if (sc->sc_flags & WM_F_PCIX) {
   2112 			switch (reg & STATUS_PCIXSPD_MASK) {
   2113 			case STATUS_PCIXSPD_50_66:
   2114 				sc->sc_bus_speed = 66;
   2115 				break;
   2116 			case STATUS_PCIXSPD_66_100:
   2117 				sc->sc_bus_speed = 100;
   2118 				break;
   2119 			case STATUS_PCIXSPD_100_133:
   2120 				sc->sc_bus_speed = 133;
   2121 				break;
   2122 			default:
   2123 				aprint_error_dev(sc->sc_dev,
   2124 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2125 				    reg & STATUS_PCIXSPD_MASK);
   2126 				sc->sc_bus_speed = 66;
   2127 				break;
   2128 			}
   2129 		} else
   2130 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2131 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2132 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2133 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2134 	}
   2135 
   2136 	/* clear interesting stat counters */
   2137 	CSR_READ(sc, WMREG_COLC);
   2138 	CSR_READ(sc, WMREG_RXERRC);
   2139 
   2140 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2141 	    || (sc->sc_type >= WM_T_ICH8))
   2142 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2143 	if (sc->sc_type >= WM_T_ICH8)
   2144 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2145 
   2146 	/* Set PHY, NVM mutex related stuff */
   2147 	switch (sc->sc_type) {
   2148 	case WM_T_82542_2_0:
   2149 	case WM_T_82542_2_1:
   2150 	case WM_T_82543:
   2151 	case WM_T_82544:
   2152 		/* Microwire */
   2153 		sc->nvm.read = wm_nvm_read_uwire;
   2154 		sc->sc_nvm_wordsize = 64;
   2155 		sc->sc_nvm_addrbits = 6;
   2156 		break;
   2157 	case WM_T_82540:
   2158 	case WM_T_82545:
   2159 	case WM_T_82545_3:
   2160 	case WM_T_82546:
   2161 	case WM_T_82546_3:
   2162 		/* Microwire */
   2163 		sc->nvm.read = wm_nvm_read_uwire;
   2164 		reg = CSR_READ(sc, WMREG_EECD);
   2165 		if (reg & EECD_EE_SIZE) {
   2166 			sc->sc_nvm_wordsize = 256;
   2167 			sc->sc_nvm_addrbits = 8;
   2168 		} else {
   2169 			sc->sc_nvm_wordsize = 64;
   2170 			sc->sc_nvm_addrbits = 6;
   2171 		}
   2172 		sc->sc_flags |= WM_F_LOCK_EECD;
   2173 		sc->nvm.acquire = wm_get_eecd;
   2174 		sc->nvm.release = wm_put_eecd;
   2175 		break;
   2176 	case WM_T_82541:
   2177 	case WM_T_82541_2:
   2178 	case WM_T_82547:
   2179 	case WM_T_82547_2:
   2180 		reg = CSR_READ(sc, WMREG_EECD);
   2181 		/*
   2182 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2183 		 * on 8254[17], so set flags and functios before calling it.
   2184 		 */
   2185 		sc->sc_flags |= WM_F_LOCK_EECD;
   2186 		sc->nvm.acquire = wm_get_eecd;
   2187 		sc->nvm.release = wm_put_eecd;
   2188 		if (reg & EECD_EE_TYPE) {
   2189 			/* SPI */
   2190 			sc->nvm.read = wm_nvm_read_spi;
   2191 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2192 			wm_nvm_set_addrbits_size_eecd(sc);
   2193 		} else {
   2194 			/* Microwire */
   2195 			sc->nvm.read = wm_nvm_read_uwire;
   2196 			if ((reg & EECD_EE_ABITS) != 0) {
   2197 				sc->sc_nvm_wordsize = 256;
   2198 				sc->sc_nvm_addrbits = 8;
   2199 			} else {
   2200 				sc->sc_nvm_wordsize = 64;
   2201 				sc->sc_nvm_addrbits = 6;
   2202 			}
   2203 		}
   2204 		break;
   2205 	case WM_T_82571:
   2206 	case WM_T_82572:
   2207 		/* SPI */
   2208 		sc->nvm.read = wm_nvm_read_eerd;
   2209 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2210 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2211 		wm_nvm_set_addrbits_size_eecd(sc);
   2212 		sc->phy.acquire = wm_get_swsm_semaphore;
   2213 		sc->phy.release = wm_put_swsm_semaphore;
   2214 		sc->nvm.acquire = wm_get_nvm_82571;
   2215 		sc->nvm.release = wm_put_nvm_82571;
   2216 		break;
   2217 	case WM_T_82573:
   2218 	case WM_T_82574:
   2219 	case WM_T_82583:
   2220 		sc->nvm.read = wm_nvm_read_eerd;
   2221 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2222 		if (sc->sc_type == WM_T_82573) {
   2223 			sc->phy.acquire = wm_get_swsm_semaphore;
   2224 			sc->phy.release = wm_put_swsm_semaphore;
   2225 			sc->nvm.acquire = wm_get_nvm_82571;
   2226 			sc->nvm.release = wm_put_nvm_82571;
   2227 		} else {
   2228 			/* Both PHY and NVM use the same semaphore. */
   2229 			sc->phy.acquire = sc->nvm.acquire
   2230 			    = wm_get_swfwhw_semaphore;
   2231 			sc->phy.release = sc->nvm.release
   2232 			    = wm_put_swfwhw_semaphore;
   2233 		}
   2234 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2235 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2236 			sc->sc_nvm_wordsize = 2048;
   2237 		} else {
   2238 			/* SPI */
   2239 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2240 			wm_nvm_set_addrbits_size_eecd(sc);
   2241 		}
   2242 		break;
   2243 	case WM_T_82575:
   2244 	case WM_T_82576:
   2245 	case WM_T_82580:
   2246 	case WM_T_I350:
   2247 	case WM_T_I354:
   2248 	case WM_T_80003:
   2249 		/* SPI */
   2250 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2251 		wm_nvm_set_addrbits_size_eecd(sc);
   2252 		if ((sc->sc_type == WM_T_80003)
   2253 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2254 			sc->nvm.read = wm_nvm_read_eerd;
   2255 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2256 		} else {
   2257 			sc->nvm.read = wm_nvm_read_spi;
   2258 			sc->sc_flags |= WM_F_LOCK_EECD;
   2259 		}
   2260 		sc->phy.acquire = wm_get_phy_82575;
   2261 		sc->phy.release = wm_put_phy_82575;
   2262 		sc->nvm.acquire = wm_get_nvm_80003;
   2263 		sc->nvm.release = wm_put_nvm_80003;
   2264 		break;
   2265 	case WM_T_ICH8:
   2266 	case WM_T_ICH9:
   2267 	case WM_T_ICH10:
   2268 	case WM_T_PCH:
   2269 	case WM_T_PCH2:
   2270 	case WM_T_PCH_LPT:
   2271 		sc->nvm.read = wm_nvm_read_ich8;
   2272 		/* FLASH */
   2273 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2274 		sc->sc_nvm_wordsize = 2048;
   2275 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2276 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2277 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2278 			aprint_error_dev(sc->sc_dev,
   2279 			    "can't map FLASH registers\n");
   2280 			goto out;
   2281 		}
   2282 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2283 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2284 		    ICH_FLASH_SECTOR_SIZE;
   2285 		sc->sc_ich8_flash_bank_size =
   2286 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2287 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2288 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2289 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2290 		sc->sc_flashreg_offset = 0;
   2291 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2292 		sc->phy.release = wm_put_swflag_ich8lan;
   2293 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2294 		sc->nvm.release = wm_put_nvm_ich8lan;
   2295 		break;
   2296 	case WM_T_PCH_SPT:
   2297 	case WM_T_PCH_CNP:
   2298 		sc->nvm.read = wm_nvm_read_spt;
   2299 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2300 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2301 		sc->sc_flasht = sc->sc_st;
   2302 		sc->sc_flashh = sc->sc_sh;
   2303 		sc->sc_ich8_flash_base = 0;
   2304 		sc->sc_nvm_wordsize =
   2305 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2306 		    * NVM_SIZE_MULTIPLIER;
   2307 		/* It is size in bytes, we want words */
   2308 		sc->sc_nvm_wordsize /= 2;
   2309 		/* Assume 2 banks */
   2310 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2311 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2312 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2313 		sc->phy.release = wm_put_swflag_ich8lan;
   2314 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2315 		sc->nvm.release = wm_put_nvm_ich8lan;
   2316 		break;
   2317 	case WM_T_I210:
   2318 	case WM_T_I211:
   2319 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2320 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2321 		if (wm_nvm_flash_presence_i210(sc)) {
   2322 			sc->nvm.read = wm_nvm_read_eerd;
   2323 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2324 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2325 			wm_nvm_set_addrbits_size_eecd(sc);
   2326 		} else {
   2327 			sc->nvm.read = wm_nvm_read_invm;
   2328 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2329 			sc->sc_nvm_wordsize = INVM_SIZE;
   2330 		}
   2331 		sc->phy.acquire = wm_get_phy_82575;
   2332 		sc->phy.release = wm_put_phy_82575;
   2333 		sc->nvm.acquire = wm_get_nvm_80003;
   2334 		sc->nvm.release = wm_put_nvm_80003;
   2335 		break;
   2336 	default:
   2337 		break;
   2338 	}
   2339 
   2340 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2341 	switch (sc->sc_type) {
   2342 	case WM_T_82571:
   2343 	case WM_T_82572:
   2344 		reg = CSR_READ(sc, WMREG_SWSM2);
   2345 		if ((reg & SWSM2_LOCK) == 0) {
   2346 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2347 			force_clear_smbi = true;
   2348 		} else
   2349 			force_clear_smbi = false;
   2350 		break;
   2351 	case WM_T_82573:
   2352 	case WM_T_82574:
   2353 	case WM_T_82583:
   2354 		force_clear_smbi = true;
   2355 		break;
   2356 	default:
   2357 		force_clear_smbi = false;
   2358 		break;
   2359 	}
   2360 	if (force_clear_smbi) {
   2361 		reg = CSR_READ(sc, WMREG_SWSM);
   2362 		if ((reg & SWSM_SMBI) != 0)
   2363 			aprint_error_dev(sc->sc_dev,
   2364 			    "Please update the Bootagent\n");
   2365 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2366 	}
   2367 
   2368 	/*
   2369 	 * Defer printing the EEPROM type until after verifying the checksum
   2370 	 * This allows the EEPROM type to be printed correctly in the case
   2371 	 * that no EEPROM is attached.
   2372 	 */
   2373 	/*
   2374 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2375 	 * this for later, so we can fail future reads from the EEPROM.
   2376 	 */
   2377 	if (wm_nvm_validate_checksum(sc)) {
   2378 		/*
   2379 		 * Read twice again because some PCI-e parts fail the
   2380 		 * first check due to the link being in sleep state.
   2381 		 */
   2382 		if (wm_nvm_validate_checksum(sc))
   2383 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2384 	}
   2385 
   2386 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2387 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2388 	else {
   2389 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2390 		    sc->sc_nvm_wordsize);
   2391 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2392 			aprint_verbose("iNVM");
   2393 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2394 			aprint_verbose("FLASH(HW)");
   2395 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2396 			aprint_verbose("FLASH");
   2397 		else {
   2398 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2399 				eetype = "SPI";
   2400 			else
   2401 				eetype = "MicroWire";
   2402 			aprint_verbose("(%d address bits) %s EEPROM",
   2403 			    sc->sc_nvm_addrbits, eetype);
   2404 		}
   2405 	}
   2406 	wm_nvm_version(sc);
   2407 	aprint_verbose("\n");
   2408 
   2409 	/*
   2410 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2411 	 * incorrect.
   2412 	 */
   2413 	wm_gmii_setup_phytype(sc, 0, 0);
   2414 
   2415 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2416 	switch (sc->sc_type) {
   2417 	case WM_T_ICH8:
   2418 	case WM_T_ICH9:
   2419 	case WM_T_ICH10:
   2420 	case WM_T_PCH:
   2421 	case WM_T_PCH2:
   2422 	case WM_T_PCH_LPT:
   2423 	case WM_T_PCH_SPT:
   2424 	case WM_T_PCH_CNP:
   2425 		apme_mask = WUC_APME;
   2426 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2427 		if ((eeprom_data & apme_mask) != 0)
   2428 			sc->sc_flags |= WM_F_WOL;
   2429 		break;
   2430 	default:
   2431 		break;
   2432 	}
   2433 
   2434 	/* Reset the chip to a known state. */
   2435 	wm_reset(sc);
   2436 
   2437 	/*
   2438 	 * Check for I21[01] PLL workaround.
   2439 	 *
   2440 	 * Three cases:
   2441 	 * a) Chip is I211.
   2442 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2443 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2444 	 */
   2445 	if (sc->sc_type == WM_T_I211)
   2446 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2447 	if (sc->sc_type == WM_T_I210) {
   2448 		if (!wm_nvm_flash_presence_i210(sc))
   2449 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2450 		else if ((sc->sc_nvm_ver_major < 3)
   2451 		    || ((sc->sc_nvm_ver_major == 3)
   2452 			&& (sc->sc_nvm_ver_minor < 25))) {
   2453 			aprint_verbose_dev(sc->sc_dev,
   2454 			    "ROM image version %d.%d is older than 3.25\n",
   2455 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2456 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2457 		}
   2458 	}
   2459 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2460 		wm_pll_workaround_i210(sc);
   2461 
   2462 	wm_get_wakeup(sc);
   2463 
   2464 	/* Non-AMT based hardware can now take control from firmware */
   2465 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2466 		wm_get_hw_control(sc);
   2467 
   2468 	/*
   2469 	 * Read the Ethernet address from the EEPROM, if not first found
   2470 	 * in device properties.
   2471 	 */
   2472 	ea = prop_dictionary_get(dict, "mac-address");
   2473 	if (ea != NULL) {
   2474 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2475 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2476 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2477 	} else {
   2478 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2479 			aprint_error_dev(sc->sc_dev,
   2480 			    "unable to read Ethernet address\n");
   2481 			goto out;
   2482 		}
   2483 	}
   2484 
   2485 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2486 	    ether_sprintf(enaddr));
   2487 
   2488 	/*
   2489 	 * Read the config info from the EEPROM, and set up various
   2490 	 * bits in the control registers based on their contents.
   2491 	 */
   2492 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2493 	if (pn != NULL) {
   2494 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2495 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2496 	} else {
   2497 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2498 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2499 			goto out;
   2500 		}
   2501 	}
   2502 
   2503 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2504 	if (pn != NULL) {
   2505 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2506 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2507 	} else {
   2508 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2509 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2510 			goto out;
   2511 		}
   2512 	}
   2513 
   2514 	/* check for WM_F_WOL */
   2515 	switch (sc->sc_type) {
   2516 	case WM_T_82542_2_0:
   2517 	case WM_T_82542_2_1:
   2518 	case WM_T_82543:
   2519 		/* dummy? */
   2520 		eeprom_data = 0;
   2521 		apme_mask = NVM_CFG3_APME;
   2522 		break;
   2523 	case WM_T_82544:
   2524 		apme_mask = NVM_CFG2_82544_APM_EN;
   2525 		eeprom_data = cfg2;
   2526 		break;
   2527 	case WM_T_82546:
   2528 	case WM_T_82546_3:
   2529 	case WM_T_82571:
   2530 	case WM_T_82572:
   2531 	case WM_T_82573:
   2532 	case WM_T_82574:
   2533 	case WM_T_82583:
   2534 	case WM_T_80003:
   2535 	case WM_T_82575:
   2536 	case WM_T_82576:
   2537 		apme_mask = NVM_CFG3_APME;
   2538 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2539 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2540 		break;
   2541 	case WM_T_82580:
   2542 	case WM_T_I350:
   2543 	case WM_T_I354:
   2544 	case WM_T_I210:
   2545 	case WM_T_I211:
   2546 		apme_mask = NVM_CFG3_APME;
   2547 		wm_nvm_read(sc,
   2548 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2549 		    1, &eeprom_data);
   2550 		break;
   2551 	case WM_T_ICH8:
   2552 	case WM_T_ICH9:
   2553 	case WM_T_ICH10:
   2554 	case WM_T_PCH:
   2555 	case WM_T_PCH2:
   2556 	case WM_T_PCH_LPT:
   2557 	case WM_T_PCH_SPT:
   2558 	case WM_T_PCH_CNP:
   2559 		/* Already checked before wm_reset () */
   2560 		apme_mask = eeprom_data = 0;
   2561 		break;
   2562 	default: /* XXX 82540 */
   2563 		apme_mask = NVM_CFG3_APME;
   2564 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2565 		break;
   2566 	}
   2567 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2568 	if ((eeprom_data & apme_mask) != 0)
   2569 		sc->sc_flags |= WM_F_WOL;
   2570 
   2571 	/*
   2572 	 * We have the eeprom settings, now apply the special cases
   2573 	 * where the eeprom may be wrong or the board won't support
   2574 	 * wake on lan on a particular port
   2575 	 */
   2576 	switch (sc->sc_pcidevid) {
   2577 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2578 		sc->sc_flags &= ~WM_F_WOL;
   2579 		break;
   2580 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2581 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2582 		/* Wake events only supported on port A for dual fiber
   2583 		 * regardless of eeprom setting */
   2584 		if (sc->sc_funcid == 1)
   2585 			sc->sc_flags &= ~WM_F_WOL;
   2586 		break;
   2587 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2588 		/* If quad port adapter, disable WoL on all but port A */
   2589 		if (sc->sc_funcid != 0)
   2590 			sc->sc_flags &= ~WM_F_WOL;
   2591 		break;
   2592 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2593 		/* Wake events only supported on port A for dual fiber
   2594 		 * regardless of eeprom setting */
   2595 		if (sc->sc_funcid == 1)
   2596 			sc->sc_flags &= ~WM_F_WOL;
   2597 		break;
   2598 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2599 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2600 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2601 		/* If quad port adapter, disable WoL on all but port A */
   2602 		if (sc->sc_funcid != 0)
   2603 			sc->sc_flags &= ~WM_F_WOL;
   2604 		break;
   2605 	}
   2606 
   2607 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2608 		/* Check NVM for autonegotiation */
   2609 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2610 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2611 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2612 		}
   2613 	}
   2614 
   2615 	/*
   2616 	 * XXX need special handling for some multiple port cards
   2617 	 * to disable a paticular port.
   2618 	 */
   2619 
   2620 	if (sc->sc_type >= WM_T_82544) {
   2621 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2622 		if (pn != NULL) {
   2623 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2624 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2625 		} else {
   2626 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2627 				aprint_error_dev(sc->sc_dev,
   2628 				    "unable to read SWDPIN\n");
   2629 				goto out;
   2630 			}
   2631 		}
   2632 	}
   2633 
   2634 	if (cfg1 & NVM_CFG1_ILOS)
   2635 		sc->sc_ctrl |= CTRL_ILOS;
   2636 
   2637 	/*
   2638 	 * XXX
   2639 	 * This code isn't correct because pin 2 and 3 are located
   2640 	 * in different position on newer chips. Check all datasheet.
   2641 	 *
   2642 	 * Until resolve this problem, check if a chip < 82580
   2643 	 */
   2644 	if (sc->sc_type <= WM_T_82580) {
   2645 		if (sc->sc_type >= WM_T_82544) {
   2646 			sc->sc_ctrl |=
   2647 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2648 			    CTRL_SWDPIO_SHIFT;
   2649 			sc->sc_ctrl |=
   2650 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2651 			    CTRL_SWDPINS_SHIFT;
   2652 		} else {
   2653 			sc->sc_ctrl |=
   2654 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2655 			    CTRL_SWDPIO_SHIFT;
   2656 		}
   2657 	}
   2658 
   2659 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2660 		wm_nvm_read(sc,
   2661 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2662 		    1, &nvmword);
   2663 		if (nvmword & NVM_CFG3_ILOS)
   2664 			sc->sc_ctrl |= CTRL_ILOS;
   2665 	}
   2666 
   2667 #if 0
   2668 	if (sc->sc_type >= WM_T_82544) {
   2669 		if (cfg1 & NVM_CFG1_IPS0)
   2670 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2671 		if (cfg1 & NVM_CFG1_IPS1)
   2672 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2673 		sc->sc_ctrl_ext |=
   2674 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2675 		    CTRL_EXT_SWDPIO_SHIFT;
   2676 		sc->sc_ctrl_ext |=
   2677 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2678 		    CTRL_EXT_SWDPINS_SHIFT;
   2679 	} else {
   2680 		sc->sc_ctrl_ext |=
   2681 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2682 		    CTRL_EXT_SWDPIO_SHIFT;
   2683 	}
   2684 #endif
   2685 
   2686 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2687 #if 0
   2688 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2689 #endif
   2690 
   2691 	if (sc->sc_type == WM_T_PCH) {
   2692 		uint16_t val;
   2693 
   2694 		/* Save the NVM K1 bit setting */
   2695 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2696 
   2697 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2698 			sc->sc_nvm_k1_enabled = 1;
   2699 		else
   2700 			sc->sc_nvm_k1_enabled = 0;
   2701 	}
   2702 
   2703 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2704 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2705 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2706 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2707 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2708 	    || sc->sc_type == WM_T_82573
   2709 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2710 		/* Copper only */
   2711 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2712 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2713 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2714 	    || (sc->sc_type ==WM_T_I211)) {
   2715 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2716 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2717 		switch (link_mode) {
   2718 		case CTRL_EXT_LINK_MODE_1000KX:
   2719 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2720 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2721 			break;
   2722 		case CTRL_EXT_LINK_MODE_SGMII:
   2723 			if (wm_sgmii_uses_mdio(sc)) {
   2724 				aprint_verbose_dev(sc->sc_dev,
   2725 				    "SGMII(MDIO)\n");
   2726 				sc->sc_flags |= WM_F_SGMII;
   2727 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2728 				break;
   2729 			}
   2730 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2731 			/*FALLTHROUGH*/
   2732 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2733 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2734 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2735 				if (link_mode
   2736 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2737 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2738 					sc->sc_flags |= WM_F_SGMII;
   2739 				} else {
   2740 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2741 					aprint_verbose_dev(sc->sc_dev,
   2742 					    "SERDES\n");
   2743 				}
   2744 				break;
   2745 			}
   2746 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2747 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2748 
   2749 			/* Change current link mode setting */
   2750 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2751 			switch (sc->sc_mediatype) {
   2752 			case WM_MEDIATYPE_COPPER:
   2753 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2754 				break;
   2755 			case WM_MEDIATYPE_SERDES:
   2756 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2757 				break;
   2758 			default:
   2759 				break;
   2760 			}
   2761 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2762 			break;
   2763 		case CTRL_EXT_LINK_MODE_GMII:
   2764 		default:
   2765 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2766 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2767 			break;
   2768 		}
   2769 
   2770 		reg &= ~CTRL_EXT_I2C_ENA;
   2771 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2772 			reg |= CTRL_EXT_I2C_ENA;
   2773 		else
   2774 			reg &= ~CTRL_EXT_I2C_ENA;
   2775 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2776 	} else if (sc->sc_type < WM_T_82543 ||
   2777 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2778 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2779 			aprint_error_dev(sc->sc_dev,
   2780 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2781 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2782 		}
   2783 	} else {
   2784 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2785 			aprint_error_dev(sc->sc_dev,
   2786 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2787 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2788 		}
   2789 	}
   2790 
   2791 	if (sc->sc_type >= WM_T_PCH2)
   2792 		sc->sc_flags |= WM_F_EEE;
   2793 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2794 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2795 		/* XXX: Need special handling for I354. (not yet) */
   2796 		if (sc->sc_type != WM_T_I354)
   2797 			sc->sc_flags |= WM_F_EEE;
   2798 	}
   2799 
   2800 	/* Set device properties (macflags) */
   2801 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2802 
   2803 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2804 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2805 
   2806 	/* Initialize the media structures accordingly. */
   2807 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2808 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2809 	else
   2810 		wm_tbi_mediainit(sc); /* All others */
   2811 
   2812 	ifp = &sc->sc_ethercom.ec_if;
   2813 	xname = device_xname(sc->sc_dev);
   2814 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2815 	ifp->if_softc = sc;
   2816 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2817 #ifdef WM_MPSAFE
   2818 	ifp->if_extflags = IFEF_MPSAFE;
   2819 #endif
   2820 	ifp->if_ioctl = wm_ioctl;
   2821 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2822 		ifp->if_start = wm_nq_start;
   2823 		/*
   2824 		 * When the number of CPUs is one and the controller can use
   2825 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2826 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2827 		 * and the other is used for link status changing.
   2828 		 * In this situation, wm_nq_transmit() is disadvantageous
   2829 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2830 		 */
   2831 		if (wm_is_using_multiqueue(sc))
   2832 			ifp->if_transmit = wm_nq_transmit;
   2833 	} else {
   2834 		ifp->if_start = wm_start;
   2835 		/*
   2836 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2837 		 */
   2838 		if (wm_is_using_multiqueue(sc))
   2839 			ifp->if_transmit = wm_transmit;
   2840 	}
   2841 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2842 	ifp->if_init = wm_init;
   2843 	ifp->if_stop = wm_stop;
   2844 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2845 	IFQ_SET_READY(&ifp->if_snd);
   2846 
   2847 	/* Check for jumbo frame */
   2848 	switch (sc->sc_type) {
   2849 	case WM_T_82573:
   2850 		/* XXX limited to 9234 if ASPM is disabled */
   2851 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2852 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2853 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2854 		break;
   2855 	case WM_T_82571:
   2856 	case WM_T_82572:
   2857 	case WM_T_82574:
   2858 	case WM_T_82583:
   2859 	case WM_T_82575:
   2860 	case WM_T_82576:
   2861 	case WM_T_82580:
   2862 	case WM_T_I350:
   2863 	case WM_T_I354:
   2864 	case WM_T_I210:
   2865 	case WM_T_I211:
   2866 	case WM_T_80003:
   2867 	case WM_T_ICH9:
   2868 	case WM_T_ICH10:
   2869 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2870 	case WM_T_PCH_LPT:
   2871 	case WM_T_PCH_SPT:
   2872 	case WM_T_PCH_CNP:
   2873 		/* XXX limited to 9234 */
   2874 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2875 		break;
   2876 	case WM_T_PCH:
   2877 		/* XXX limited to 4096 */
   2878 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2879 		break;
   2880 	case WM_T_82542_2_0:
   2881 	case WM_T_82542_2_1:
   2882 	case WM_T_ICH8:
   2883 		/* No support for jumbo frame */
   2884 		break;
   2885 	default:
   2886 		/* ETHER_MAX_LEN_JUMBO */
   2887 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2888 		break;
   2889 	}
   2890 
   2891 	/* If we're a i82543 or greater, we can support VLANs. */
   2892 	if (sc->sc_type >= WM_T_82543) {
   2893 		sc->sc_ethercom.ec_capabilities |=
   2894 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2895 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   2896 	}
   2897 
   2898 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2899 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2900 
   2901 	/*
   2902 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2903 	 * on i82543 and later.
   2904 	 */
   2905 	if (sc->sc_type >= WM_T_82543) {
   2906 		ifp->if_capabilities |=
   2907 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2908 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2909 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2910 		    IFCAP_CSUM_TCPv6_Tx |
   2911 		    IFCAP_CSUM_UDPv6_Tx;
   2912 	}
   2913 
   2914 	/*
   2915 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2916 	 *
   2917 	 *	82541GI (8086:1076) ... no
   2918 	 *	82572EI (8086:10b9) ... yes
   2919 	 */
   2920 	if (sc->sc_type >= WM_T_82571) {
   2921 		ifp->if_capabilities |=
   2922 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2923 	}
   2924 
   2925 	/*
   2926 	 * If we're a i82544 or greater (except i82547), we can do
   2927 	 * TCP segmentation offload.
   2928 	 */
   2929 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2930 		ifp->if_capabilities |= IFCAP_TSOv4;
   2931 	}
   2932 
   2933 	if (sc->sc_type >= WM_T_82571) {
   2934 		ifp->if_capabilities |= IFCAP_TSOv6;
   2935 	}
   2936 
   2937 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2938 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2939 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2940 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2941 
   2942 #ifdef WM_MPSAFE
   2943 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2944 #else
   2945 	sc->sc_core_lock = NULL;
   2946 #endif
   2947 
   2948 	/* Attach the interface. */
   2949 	error = if_initialize(ifp);
   2950 	if (error != 0) {
   2951 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2952 		    error);
   2953 		return; /* Error */
   2954 	}
   2955 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2956 	ether_ifattach(ifp, enaddr);
   2957 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2958 	if_register(ifp);
   2959 
   2960 #ifdef WM_EVENT_COUNTERS
   2961 	/* Attach event counters. */
   2962 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2963 	    NULL, xname, "linkintr");
   2964 
   2965 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2966 	    NULL, xname, "tx_xoff");
   2967 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2968 	    NULL, xname, "tx_xon");
   2969 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2970 	    NULL, xname, "rx_xoff");
   2971 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2972 	    NULL, xname, "rx_xon");
   2973 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2974 	    NULL, xname, "rx_macctl");
   2975 #endif /* WM_EVENT_COUNTERS */
   2976 
   2977 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2978 		pmf_class_network_register(self, ifp);
   2979 	else
   2980 		aprint_error_dev(self, "couldn't establish power handler\n");
   2981 
   2982 	sc->sc_flags |= WM_F_ATTACHED;
   2983 out:
   2984 	return;
   2985 }
   2986 
   2987 /* The detach function (ca_detach) */
   2988 static int
   2989 wm_detach(device_t self, int flags __unused)
   2990 {
   2991 	struct wm_softc *sc = device_private(self);
   2992 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2993 	int i;
   2994 
   2995 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2996 		return 0;
   2997 
   2998 	/* Stop the interface. Callouts are stopped in it. */
   2999 	wm_stop(ifp, 1);
   3000 
   3001 	pmf_device_deregister(self);
   3002 
   3003 #ifdef WM_EVENT_COUNTERS
   3004 	evcnt_detach(&sc->sc_ev_linkintr);
   3005 
   3006 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3007 	evcnt_detach(&sc->sc_ev_tx_xon);
   3008 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3009 	evcnt_detach(&sc->sc_ev_rx_xon);
   3010 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3011 #endif /* WM_EVENT_COUNTERS */
   3012 
   3013 	/* Tell the firmware about the release */
   3014 	WM_CORE_LOCK(sc);
   3015 	wm_release_manageability(sc);
   3016 	wm_release_hw_control(sc);
   3017 	wm_enable_wakeup(sc);
   3018 	WM_CORE_UNLOCK(sc);
   3019 
   3020 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3021 
   3022 	/* Delete all remaining media. */
   3023 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   3024 
   3025 	ether_ifdetach(ifp);
   3026 	if_detach(ifp);
   3027 	if_percpuq_destroy(sc->sc_ipq);
   3028 
   3029 	/* Unload RX dmamaps and free mbufs */
   3030 	for (i = 0; i < sc->sc_nqueues; i++) {
   3031 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3032 		mutex_enter(rxq->rxq_lock);
   3033 		wm_rxdrain(rxq);
   3034 		mutex_exit(rxq->rxq_lock);
   3035 	}
   3036 	/* Must unlock here */
   3037 
   3038 	/* Disestablish the interrupt handler */
   3039 	for (i = 0; i < sc->sc_nintrs; i++) {
   3040 		if (sc->sc_ihs[i] != NULL) {
   3041 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3042 			sc->sc_ihs[i] = NULL;
   3043 		}
   3044 	}
   3045 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3046 
   3047 	wm_free_txrx_queues(sc);
   3048 
   3049 	/* Unmap the registers */
   3050 	if (sc->sc_ss) {
   3051 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3052 		sc->sc_ss = 0;
   3053 	}
   3054 	if (sc->sc_ios) {
   3055 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3056 		sc->sc_ios = 0;
   3057 	}
   3058 	if (sc->sc_flashs) {
   3059 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3060 		sc->sc_flashs = 0;
   3061 	}
   3062 
   3063 	if (sc->sc_core_lock)
   3064 		mutex_obj_free(sc->sc_core_lock);
   3065 	if (sc->sc_ich_phymtx)
   3066 		mutex_obj_free(sc->sc_ich_phymtx);
   3067 	if (sc->sc_ich_nvmmtx)
   3068 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3069 
   3070 	return 0;
   3071 }
   3072 
   3073 static bool
   3074 wm_suspend(device_t self, const pmf_qual_t *qual)
   3075 {
   3076 	struct wm_softc *sc = device_private(self);
   3077 
   3078 	wm_release_manageability(sc);
   3079 	wm_release_hw_control(sc);
   3080 	wm_enable_wakeup(sc);
   3081 
   3082 	return true;
   3083 }
   3084 
   3085 static bool
   3086 wm_resume(device_t self, const pmf_qual_t *qual)
   3087 {
   3088 	struct wm_softc *sc = device_private(self);
   3089 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3090 	pcireg_t reg;
   3091 	char buf[256];
   3092 
   3093 	reg = CSR_READ(sc, WMREG_WUS);
   3094 	if (reg != 0) {
   3095 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3096 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3097 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3098 	}
   3099 
   3100 	if (sc->sc_type >= WM_T_PCH2)
   3101 		wm_resume_workarounds_pchlan(sc);
   3102 	if ((ifp->if_flags & IFF_UP) == 0) {
   3103 		wm_reset(sc);
   3104 		/* Non-AMT based hardware can now take control from firmware */
   3105 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3106 			wm_get_hw_control(sc);
   3107 		wm_init_manageability(sc);
   3108 	} else {
   3109 		/*
   3110 		 * We called pmf_class_network_register(), so if_init() is
   3111 		 * automatically called when IFF_UP. wm_reset(),
   3112 		 * wm_get_hw_control() and wm_init_manageability() are called
   3113 		 * via wm_init().
   3114 		 */
   3115 	}
   3116 
   3117 	return true;
   3118 }
   3119 
   3120 /*
   3121  * wm_watchdog:		[ifnet interface function]
   3122  *
   3123  *	Watchdog timer handler.
   3124  */
   3125 static void
   3126 wm_watchdog(struct ifnet *ifp)
   3127 {
   3128 	int qid;
   3129 	struct wm_softc *sc = ifp->if_softc;
   3130 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3131 
   3132 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3133 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3134 
   3135 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3136 	}
   3137 
   3138 	/* IF any of queues hanged up, reset the interface. */
   3139 	if (hang_queue != 0) {
   3140 		(void)wm_init(ifp);
   3141 
   3142 		/*
   3143 		 * There are still some upper layer processing which call
   3144 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3145 		 */
   3146 		/* Try to get more packets going. */
   3147 		ifp->if_start(ifp);
   3148 	}
   3149 }
   3150 
   3151 
   3152 static void
   3153 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3154 {
   3155 
   3156 	mutex_enter(txq->txq_lock);
   3157 	if (txq->txq_sending &&
   3158 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3159 		wm_watchdog_txq_locked(ifp, txq, hang);
   3160 
   3161 	mutex_exit(txq->txq_lock);
   3162 }
   3163 
   3164 static void
   3165 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3166     uint16_t *hang)
   3167 {
   3168 	struct wm_softc *sc = ifp->if_softc;
   3169 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3170 
   3171 	KASSERT(mutex_owned(txq->txq_lock));
   3172 
   3173 	/*
   3174 	 * Since we're using delayed interrupts, sweep up
   3175 	 * before we report an error.
   3176 	 */
   3177 	wm_txeof(txq, UINT_MAX);
   3178 
   3179 	if (txq->txq_sending)
   3180 		*hang |= __BIT(wmq->wmq_id);
   3181 
   3182 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3183 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3184 		    device_xname(sc->sc_dev));
   3185 	} else {
   3186 #ifdef WM_DEBUG
   3187 		int i, j;
   3188 		struct wm_txsoft *txs;
   3189 #endif
   3190 		log(LOG_ERR,
   3191 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3192 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3193 		    txq->txq_next);
   3194 		ifp->if_oerrors++;
   3195 #ifdef WM_DEBUG
   3196 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3197 		    i = WM_NEXTTXS(txq, i)) {
   3198 			txs = &txq->txq_soft[i];
   3199 			printf("txs %d tx %d -> %d\n",
   3200 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3201 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3202 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3203 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3204 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3205 					printf("\t %#08x%08x\n",
   3206 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3207 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3208 				} else {
   3209 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3210 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3211 					    txq->txq_descs[j].wtx_addr.wa_low);
   3212 					printf("\t %#04x%02x%02x%08x\n",
   3213 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3214 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3215 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3216 					    txq->txq_descs[j].wtx_cmdlen);
   3217 				}
   3218 				if (j == txs->txs_lastdesc)
   3219 					break;
   3220 			}
   3221 		}
   3222 #endif
   3223 	}
   3224 }
   3225 
   3226 /*
   3227  * wm_tick:
   3228  *
   3229  *	One second timer, used to check link status, sweep up
   3230  *	completed transmit jobs, etc.
   3231  */
   3232 static void
   3233 wm_tick(void *arg)
   3234 {
   3235 	struct wm_softc *sc = arg;
   3236 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3237 #ifndef WM_MPSAFE
   3238 	int s = splnet();
   3239 #endif
   3240 
   3241 	WM_CORE_LOCK(sc);
   3242 
   3243 	if (sc->sc_core_stopping) {
   3244 		WM_CORE_UNLOCK(sc);
   3245 #ifndef WM_MPSAFE
   3246 		splx(s);
   3247 #endif
   3248 		return;
   3249 	}
   3250 
   3251 	if (sc->sc_type >= WM_T_82542_2_1) {
   3252 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3253 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3254 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3255 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3256 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3257 	}
   3258 
   3259 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3260 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3261 	    + CSR_READ(sc, WMREG_CRCERRS)
   3262 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3263 	    + CSR_READ(sc, WMREG_SYMERRC)
   3264 	    + CSR_READ(sc, WMREG_RXERRC)
   3265 	    + CSR_READ(sc, WMREG_SEC)
   3266 	    + CSR_READ(sc, WMREG_CEXTERR)
   3267 	    + CSR_READ(sc, WMREG_RLEC);
   3268 	/*
   3269 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3270 	 * memory. It does not mean the number of dropped packet. Because
   3271 	 * ethernet controller can receive packets in such case if there is
   3272 	 * space in phy's FIFO.
   3273 	 *
   3274 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3275 	 * own EVCNT instead of if_iqdrops.
   3276 	 */
   3277 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3278 
   3279 	if (sc->sc_flags & WM_F_HAS_MII)
   3280 		mii_tick(&sc->sc_mii);
   3281 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3282 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3283 		wm_serdes_tick(sc);
   3284 	else
   3285 		wm_tbi_tick(sc);
   3286 
   3287 	WM_CORE_UNLOCK(sc);
   3288 
   3289 	wm_watchdog(ifp);
   3290 
   3291 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3292 }
   3293 
   3294 static int
   3295 wm_ifflags_cb(struct ethercom *ec)
   3296 {
   3297 	struct ifnet *ifp = &ec->ec_if;
   3298 	struct wm_softc *sc = ifp->if_softc;
   3299 	u_short iffchange;
   3300 	int ecchange;
   3301 	bool needreset = false;
   3302 	int rc = 0;
   3303 
   3304 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3305 		device_xname(sc->sc_dev), __func__));
   3306 
   3307 	WM_CORE_LOCK(sc);
   3308 
   3309 	/*
   3310 	 * Check for if_flags.
   3311 	 * Main usage is to prevent linkdown when opening bpf.
   3312 	 */
   3313 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3314 	sc->sc_if_flags = ifp->if_flags;
   3315 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3316 		needreset = true;
   3317 		goto ec;
   3318 	}
   3319 
   3320 	/* iff related updates */
   3321 	if ((iffchange & IFF_PROMISC) != 0)
   3322 		wm_set_filter(sc);
   3323 
   3324 	wm_set_vlan(sc);
   3325 
   3326 ec:
   3327 	/* Check for ec_capenable. */
   3328 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3329 	sc->sc_ec_capenable = ec->ec_capenable;
   3330 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3331 		needreset = true;
   3332 		goto out;
   3333 	}
   3334 
   3335 	/* ec related updates */
   3336 	wm_set_eee(sc);
   3337 
   3338 out:
   3339 	if (needreset)
   3340 		rc = ENETRESET;
   3341 	WM_CORE_UNLOCK(sc);
   3342 
   3343 	return rc;
   3344 }
   3345 
   3346 /*
   3347  * wm_ioctl:		[ifnet interface function]
   3348  *
   3349  *	Handle control requests from the operator.
   3350  */
   3351 static int
   3352 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3353 {
   3354 	struct wm_softc *sc = ifp->if_softc;
   3355 	struct ifreq *ifr = (struct ifreq *)data;
   3356 	struct ifaddr *ifa = (struct ifaddr *)data;
   3357 	struct sockaddr_dl *sdl;
   3358 	int s, error;
   3359 
   3360 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3361 		device_xname(sc->sc_dev), __func__));
   3362 
   3363 #ifndef WM_MPSAFE
   3364 	s = splnet();
   3365 #endif
   3366 	switch (cmd) {
   3367 	case SIOCSIFMEDIA:
   3368 		WM_CORE_LOCK(sc);
   3369 		/* Flow control requires full-duplex mode. */
   3370 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3371 		    (ifr->ifr_media & IFM_FDX) == 0)
   3372 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3373 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3374 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3375 				/* We can do both TXPAUSE and RXPAUSE. */
   3376 				ifr->ifr_media |=
   3377 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3378 			}
   3379 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3380 		}
   3381 		WM_CORE_UNLOCK(sc);
   3382 #ifdef WM_MPSAFE
   3383 		s = splnet();
   3384 #endif
   3385 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3386 #ifdef WM_MPSAFE
   3387 		splx(s);
   3388 #endif
   3389 		break;
   3390 	case SIOCINITIFADDR:
   3391 		WM_CORE_LOCK(sc);
   3392 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3393 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3394 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3395 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3396 			/* Unicast address is the first multicast entry */
   3397 			wm_set_filter(sc);
   3398 			error = 0;
   3399 			WM_CORE_UNLOCK(sc);
   3400 			break;
   3401 		}
   3402 		WM_CORE_UNLOCK(sc);
   3403 		/*FALLTHROUGH*/
   3404 	default:
   3405 #ifdef WM_MPSAFE
   3406 		s = splnet();
   3407 #endif
   3408 		/* It may call wm_start, so unlock here */
   3409 		error = ether_ioctl(ifp, cmd, data);
   3410 #ifdef WM_MPSAFE
   3411 		splx(s);
   3412 #endif
   3413 		if (error != ENETRESET)
   3414 			break;
   3415 
   3416 		error = 0;
   3417 
   3418 		if (cmd == SIOCSIFCAP)
   3419 			error = (*ifp->if_init)(ifp);
   3420 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3421 			;
   3422 		else if (ifp->if_flags & IFF_RUNNING) {
   3423 			/*
   3424 			 * Multicast list has changed; set the hardware filter
   3425 			 * accordingly.
   3426 			 */
   3427 			WM_CORE_LOCK(sc);
   3428 			wm_set_filter(sc);
   3429 			WM_CORE_UNLOCK(sc);
   3430 		}
   3431 		break;
   3432 	}
   3433 
   3434 #ifndef WM_MPSAFE
   3435 	splx(s);
   3436 #endif
   3437 	return error;
   3438 }
   3439 
   3440 /* MAC address related */
   3441 
   3442 /*
   3443  * Get the offset of MAC address and return it.
   3444  * If error occured, use offset 0.
   3445  */
   3446 static uint16_t
   3447 wm_check_alt_mac_addr(struct wm_softc *sc)
   3448 {
   3449 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3450 	uint16_t offset = NVM_OFF_MACADDR;
   3451 
   3452 	/* Try to read alternative MAC address pointer */
   3453 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3454 		return 0;
   3455 
   3456 	/* Check pointer if it's valid or not. */
   3457 	if ((offset == 0x0000) || (offset == 0xffff))
   3458 		return 0;
   3459 
   3460 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3461 	/*
   3462 	 * Check whether alternative MAC address is valid or not.
   3463 	 * Some cards have non 0xffff pointer but those don't use
   3464 	 * alternative MAC address in reality.
   3465 	 *
   3466 	 * Check whether the broadcast bit is set or not.
   3467 	 */
   3468 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3469 		if (((myea[0] & 0xff) & 0x01) == 0)
   3470 			return offset; /* Found */
   3471 
   3472 	/* Not found */
   3473 	return 0;
   3474 }
   3475 
   3476 static int
   3477 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3478 {
   3479 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3480 	uint16_t offset = NVM_OFF_MACADDR;
   3481 	int do_invert = 0;
   3482 
   3483 	switch (sc->sc_type) {
   3484 	case WM_T_82580:
   3485 	case WM_T_I350:
   3486 	case WM_T_I354:
   3487 		/* EEPROM Top Level Partitioning */
   3488 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3489 		break;
   3490 	case WM_T_82571:
   3491 	case WM_T_82575:
   3492 	case WM_T_82576:
   3493 	case WM_T_80003:
   3494 	case WM_T_I210:
   3495 	case WM_T_I211:
   3496 		offset = wm_check_alt_mac_addr(sc);
   3497 		if (offset == 0)
   3498 			if ((sc->sc_funcid & 0x01) == 1)
   3499 				do_invert = 1;
   3500 		break;
   3501 	default:
   3502 		if ((sc->sc_funcid & 0x01) == 1)
   3503 			do_invert = 1;
   3504 		break;
   3505 	}
   3506 
   3507 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3508 		goto bad;
   3509 
   3510 	enaddr[0] = myea[0] & 0xff;
   3511 	enaddr[1] = myea[0] >> 8;
   3512 	enaddr[2] = myea[1] & 0xff;
   3513 	enaddr[3] = myea[1] >> 8;
   3514 	enaddr[4] = myea[2] & 0xff;
   3515 	enaddr[5] = myea[2] >> 8;
   3516 
   3517 	/*
   3518 	 * Toggle the LSB of the MAC address on the second port
   3519 	 * of some dual port cards.
   3520 	 */
   3521 	if (do_invert != 0)
   3522 		enaddr[5] ^= 1;
   3523 
   3524 	return 0;
   3525 
   3526  bad:
   3527 	return -1;
   3528 }
   3529 
   3530 /*
   3531  * wm_set_ral:
   3532  *
   3533  *	Set an entery in the receive address list.
   3534  */
   3535 static void
   3536 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3537 {
   3538 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3539 	uint32_t wlock_mac;
   3540 	int rv;
   3541 
   3542 	if (enaddr != NULL) {
   3543 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3544 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3545 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3546 		ral_hi |= RAL_AV;
   3547 	} else {
   3548 		ral_lo = 0;
   3549 		ral_hi = 0;
   3550 	}
   3551 
   3552 	switch (sc->sc_type) {
   3553 	case WM_T_82542_2_0:
   3554 	case WM_T_82542_2_1:
   3555 	case WM_T_82543:
   3556 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3557 		CSR_WRITE_FLUSH(sc);
   3558 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3559 		CSR_WRITE_FLUSH(sc);
   3560 		break;
   3561 	case WM_T_PCH2:
   3562 	case WM_T_PCH_LPT:
   3563 	case WM_T_PCH_SPT:
   3564 	case WM_T_PCH_CNP:
   3565 		if (idx == 0) {
   3566 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3567 			CSR_WRITE_FLUSH(sc);
   3568 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3569 			CSR_WRITE_FLUSH(sc);
   3570 			return;
   3571 		}
   3572 		if (sc->sc_type != WM_T_PCH2) {
   3573 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3574 			    FWSM_WLOCK_MAC);
   3575 			addrl = WMREG_SHRAL(idx - 1);
   3576 			addrh = WMREG_SHRAH(idx - 1);
   3577 		} else {
   3578 			wlock_mac = 0;
   3579 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3580 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3581 		}
   3582 
   3583 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3584 			rv = wm_get_swflag_ich8lan(sc);
   3585 			if (rv != 0)
   3586 				return;
   3587 			CSR_WRITE(sc, addrl, ral_lo);
   3588 			CSR_WRITE_FLUSH(sc);
   3589 			CSR_WRITE(sc, addrh, ral_hi);
   3590 			CSR_WRITE_FLUSH(sc);
   3591 			wm_put_swflag_ich8lan(sc);
   3592 		}
   3593 
   3594 		break;
   3595 	default:
   3596 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3597 		CSR_WRITE_FLUSH(sc);
   3598 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3599 		CSR_WRITE_FLUSH(sc);
   3600 		break;
   3601 	}
   3602 }
   3603 
   3604 /*
   3605  * wm_mchash:
   3606  *
   3607  *	Compute the hash of the multicast address for the 4096-bit
   3608  *	multicast filter.
   3609  */
   3610 static uint32_t
   3611 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3612 {
   3613 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3614 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3615 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3616 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3617 	uint32_t hash;
   3618 
   3619 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3620 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3621 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3622 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3623 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3624 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3625 		return (hash & 0x3ff);
   3626 	}
   3627 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3628 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3629 
   3630 	return (hash & 0xfff);
   3631 }
   3632 
   3633 /*
   3634  *
   3635  *
   3636  */
   3637 static int
   3638 wm_rar_count(struct wm_softc *sc)
   3639 {
   3640 	int size;
   3641 
   3642 	switch (sc->sc_type) {
   3643 	case WM_T_ICH8:
   3644 		size = WM_RAL_TABSIZE_ICH8 -1;
   3645 		break;
   3646 	case WM_T_ICH9:
   3647 	case WM_T_ICH10:
   3648 	case WM_T_PCH:
   3649 		size = WM_RAL_TABSIZE_ICH8;
   3650 		break;
   3651 	case WM_T_PCH2:
   3652 		size = WM_RAL_TABSIZE_PCH2;
   3653 		break;
   3654 	case WM_T_PCH_LPT:
   3655 	case WM_T_PCH_SPT:
   3656 	case WM_T_PCH_CNP:
   3657 		size = WM_RAL_TABSIZE_PCH_LPT;
   3658 		break;
   3659 	case WM_T_82575:
   3660 	case WM_T_I210:
   3661 	case WM_T_I211:
   3662 		size = WM_RAL_TABSIZE_82575;
   3663 		break;
   3664 	case WM_T_82576:
   3665 	case WM_T_82580:
   3666 		size = WM_RAL_TABSIZE_82576;
   3667 		break;
   3668 	case WM_T_I350:
   3669 	case WM_T_I354:
   3670 		size = WM_RAL_TABSIZE_I350;
   3671 		break;
   3672 	default:
   3673 		size = WM_RAL_TABSIZE;
   3674 	}
   3675 
   3676 	return size;
   3677 }
   3678 
   3679 /*
   3680  * wm_set_filter:
   3681  *
   3682  *	Set up the receive filter.
   3683  */
   3684 static void
   3685 wm_set_filter(struct wm_softc *sc)
   3686 {
   3687 	struct ethercom *ec = &sc->sc_ethercom;
   3688 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3689 	struct ether_multi *enm;
   3690 	struct ether_multistep step;
   3691 	bus_addr_t mta_reg;
   3692 	uint32_t hash, reg, bit;
   3693 	int i, size, ralmax;
   3694 
   3695 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3696 		device_xname(sc->sc_dev), __func__));
   3697 
   3698 	if (sc->sc_type >= WM_T_82544)
   3699 		mta_reg = WMREG_CORDOVA_MTA;
   3700 	else
   3701 		mta_reg = WMREG_MTA;
   3702 
   3703 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3704 
   3705 	if (ifp->if_flags & IFF_BROADCAST)
   3706 		sc->sc_rctl |= RCTL_BAM;
   3707 	if (ifp->if_flags & IFF_PROMISC) {
   3708 		sc->sc_rctl |= RCTL_UPE;
   3709 		ETHER_LOCK(ec);
   3710 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3711 		ETHER_UNLOCK(ec);
   3712 		goto allmulti;
   3713 	}
   3714 
   3715 	/*
   3716 	 * Set the station address in the first RAL slot, and
   3717 	 * clear the remaining slots.
   3718 	 */
   3719 	size = wm_rar_count(sc);
   3720 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3721 
   3722 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3723 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3724 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3725 		switch (i) {
   3726 		case 0:
   3727 			/* We can use all entries */
   3728 			ralmax = size;
   3729 			break;
   3730 		case 1:
   3731 			/* Only RAR[0] */
   3732 			ralmax = 1;
   3733 			break;
   3734 		default:
   3735 			/* Available SHRA + RAR[0] */
   3736 			ralmax = i + 1;
   3737 		}
   3738 	} else
   3739 		ralmax = size;
   3740 	for (i = 1; i < size; i++) {
   3741 		if (i < ralmax)
   3742 			wm_set_ral(sc, NULL, i);
   3743 	}
   3744 
   3745 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3746 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3747 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3748 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3749 		size = WM_ICH8_MC_TABSIZE;
   3750 	else
   3751 		size = WM_MC_TABSIZE;
   3752 	/* Clear out the multicast table. */
   3753 	for (i = 0; i < size; i++) {
   3754 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3755 		CSR_WRITE_FLUSH(sc);
   3756 	}
   3757 
   3758 	ETHER_LOCK(ec);
   3759 	ETHER_FIRST_MULTI(step, ec, enm);
   3760 	while (enm != NULL) {
   3761 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3762 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3763 			ETHER_UNLOCK(ec);
   3764 			/*
   3765 			 * We must listen to a range of multicast addresses.
   3766 			 * For now, just accept all multicasts, rather than
   3767 			 * trying to set only those filter bits needed to match
   3768 			 * the range.  (At this time, the only use of address
   3769 			 * ranges is for IP multicast routing, for which the
   3770 			 * range is big enough to require all bits set.)
   3771 			 */
   3772 			goto allmulti;
   3773 		}
   3774 
   3775 		hash = wm_mchash(sc, enm->enm_addrlo);
   3776 
   3777 		reg = (hash >> 5);
   3778 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3779 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3780 		    || (sc->sc_type == WM_T_PCH2)
   3781 		    || (sc->sc_type == WM_T_PCH_LPT)
   3782 		    || (sc->sc_type == WM_T_PCH_SPT)
   3783 		    || (sc->sc_type == WM_T_PCH_CNP))
   3784 			reg &= 0x1f;
   3785 		else
   3786 			reg &= 0x7f;
   3787 		bit = hash & 0x1f;
   3788 
   3789 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3790 		hash |= 1U << bit;
   3791 
   3792 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3793 			/*
   3794 			 * 82544 Errata 9: Certain register cannot be written
   3795 			 * with particular alignments in PCI-X bus operation
   3796 			 * (FCAH, MTA and VFTA).
   3797 			 */
   3798 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3799 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3800 			CSR_WRITE_FLUSH(sc);
   3801 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3802 			CSR_WRITE_FLUSH(sc);
   3803 		} else {
   3804 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3805 			CSR_WRITE_FLUSH(sc);
   3806 		}
   3807 
   3808 		ETHER_NEXT_MULTI(step, enm);
   3809 	}
   3810 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   3811 	ETHER_UNLOCK(ec);
   3812 
   3813 	goto setit;
   3814 
   3815  allmulti:
   3816 	sc->sc_rctl |= RCTL_MPE;
   3817 
   3818  setit:
   3819 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3820 }
   3821 
   3822 /* Reset and init related */
   3823 
   3824 static void
   3825 wm_set_vlan(struct wm_softc *sc)
   3826 {
   3827 
   3828 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3829 		device_xname(sc->sc_dev), __func__));
   3830 
   3831 	/* Deal with VLAN enables. */
   3832 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3833 		sc->sc_ctrl |= CTRL_VME;
   3834 	else
   3835 		sc->sc_ctrl &= ~CTRL_VME;
   3836 
   3837 	/* Write the control registers. */
   3838 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3839 }
   3840 
   3841 static void
   3842 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3843 {
   3844 	uint32_t gcr;
   3845 	pcireg_t ctrl2;
   3846 
   3847 	gcr = CSR_READ(sc, WMREG_GCR);
   3848 
   3849 	/* Only take action if timeout value is defaulted to 0 */
   3850 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3851 		goto out;
   3852 
   3853 	if ((gcr & GCR_CAP_VER2) == 0) {
   3854 		gcr |= GCR_CMPL_TMOUT_10MS;
   3855 		goto out;
   3856 	}
   3857 
   3858 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3859 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3860 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3861 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3862 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3863 
   3864 out:
   3865 	/* Disable completion timeout resend */
   3866 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3867 
   3868 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3869 }
   3870 
   3871 void
   3872 wm_get_auto_rd_done(struct wm_softc *sc)
   3873 {
   3874 	int i;
   3875 
   3876 	/* wait for eeprom to reload */
   3877 	switch (sc->sc_type) {
   3878 	case WM_T_82571:
   3879 	case WM_T_82572:
   3880 	case WM_T_82573:
   3881 	case WM_T_82574:
   3882 	case WM_T_82583:
   3883 	case WM_T_82575:
   3884 	case WM_T_82576:
   3885 	case WM_T_82580:
   3886 	case WM_T_I350:
   3887 	case WM_T_I354:
   3888 	case WM_T_I210:
   3889 	case WM_T_I211:
   3890 	case WM_T_80003:
   3891 	case WM_T_ICH8:
   3892 	case WM_T_ICH9:
   3893 		for (i = 0; i < 10; i++) {
   3894 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3895 				break;
   3896 			delay(1000);
   3897 		}
   3898 		if (i == 10) {
   3899 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3900 			    "complete\n", device_xname(sc->sc_dev));
   3901 		}
   3902 		break;
   3903 	default:
   3904 		break;
   3905 	}
   3906 }
   3907 
   3908 void
   3909 wm_lan_init_done(struct wm_softc *sc)
   3910 {
   3911 	uint32_t reg = 0;
   3912 	int i;
   3913 
   3914 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3915 		device_xname(sc->sc_dev), __func__));
   3916 
   3917 	/* Wait for eeprom to reload */
   3918 	switch (sc->sc_type) {
   3919 	case WM_T_ICH10:
   3920 	case WM_T_PCH:
   3921 	case WM_T_PCH2:
   3922 	case WM_T_PCH_LPT:
   3923 	case WM_T_PCH_SPT:
   3924 	case WM_T_PCH_CNP:
   3925 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3926 			reg = CSR_READ(sc, WMREG_STATUS);
   3927 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3928 				break;
   3929 			delay(100);
   3930 		}
   3931 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3932 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3933 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3934 		}
   3935 		break;
   3936 	default:
   3937 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3938 		    __func__);
   3939 		break;
   3940 	}
   3941 
   3942 	reg &= ~STATUS_LAN_INIT_DONE;
   3943 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3944 }
   3945 
   3946 void
   3947 wm_get_cfg_done(struct wm_softc *sc)
   3948 {
   3949 	int mask;
   3950 	uint32_t reg;
   3951 	int i;
   3952 
   3953 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3954 		device_xname(sc->sc_dev), __func__));
   3955 
   3956 	/* Wait for eeprom to reload */
   3957 	switch (sc->sc_type) {
   3958 	case WM_T_82542_2_0:
   3959 	case WM_T_82542_2_1:
   3960 		/* null */
   3961 		break;
   3962 	case WM_T_82543:
   3963 	case WM_T_82544:
   3964 	case WM_T_82540:
   3965 	case WM_T_82545:
   3966 	case WM_T_82545_3:
   3967 	case WM_T_82546:
   3968 	case WM_T_82546_3:
   3969 	case WM_T_82541:
   3970 	case WM_T_82541_2:
   3971 	case WM_T_82547:
   3972 	case WM_T_82547_2:
   3973 	case WM_T_82573:
   3974 	case WM_T_82574:
   3975 	case WM_T_82583:
   3976 		/* generic */
   3977 		delay(10*1000);
   3978 		break;
   3979 	case WM_T_80003:
   3980 	case WM_T_82571:
   3981 	case WM_T_82572:
   3982 	case WM_T_82575:
   3983 	case WM_T_82576:
   3984 	case WM_T_82580:
   3985 	case WM_T_I350:
   3986 	case WM_T_I354:
   3987 	case WM_T_I210:
   3988 	case WM_T_I211:
   3989 		if (sc->sc_type == WM_T_82571) {
   3990 			/* Only 82571 shares port 0 */
   3991 			mask = EEMNGCTL_CFGDONE_0;
   3992 		} else
   3993 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3994 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3995 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3996 				break;
   3997 			delay(1000);
   3998 		}
   3999 		if (i >= WM_PHY_CFG_TIMEOUT)
   4000 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   4001 				device_xname(sc->sc_dev), __func__));
   4002 		break;
   4003 	case WM_T_ICH8:
   4004 	case WM_T_ICH9:
   4005 	case WM_T_ICH10:
   4006 	case WM_T_PCH:
   4007 	case WM_T_PCH2:
   4008 	case WM_T_PCH_LPT:
   4009 	case WM_T_PCH_SPT:
   4010 	case WM_T_PCH_CNP:
   4011 		delay(10*1000);
   4012 		if (sc->sc_type >= WM_T_ICH10)
   4013 			wm_lan_init_done(sc);
   4014 		else
   4015 			wm_get_auto_rd_done(sc);
   4016 
   4017 		/* Clear PHY Reset Asserted bit */
   4018 		reg = CSR_READ(sc, WMREG_STATUS);
   4019 		if ((reg & STATUS_PHYRA) != 0)
   4020 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4021 		break;
   4022 	default:
   4023 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4024 		    __func__);
   4025 		break;
   4026 	}
   4027 }
   4028 
   4029 int
   4030 wm_phy_post_reset(struct wm_softc *sc)
   4031 {
   4032 	device_t dev = sc->sc_dev;
   4033 	uint16_t reg;
   4034 	int rv = 0;
   4035 
   4036 	/* This function is only for ICH8 and newer. */
   4037 	if (sc->sc_type < WM_T_ICH8)
   4038 		return 0;
   4039 
   4040 	if (wm_phy_resetisblocked(sc)) {
   4041 		/* XXX */
   4042 		device_printf(dev, "PHY is blocked\n");
   4043 		return -1;
   4044 	}
   4045 
   4046 	/* Allow time for h/w to get to quiescent state after reset */
   4047 	delay(10*1000);
   4048 
   4049 	/* Perform any necessary post-reset workarounds */
   4050 	if (sc->sc_type == WM_T_PCH)
   4051 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4052 	else if (sc->sc_type == WM_T_PCH2)
   4053 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4054 	if (rv != 0)
   4055 		return rv;
   4056 
   4057 	/* Clear the host wakeup bit after lcd reset */
   4058 	if (sc->sc_type >= WM_T_PCH) {
   4059 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4060 		reg &= ~BM_WUC_HOST_WU_BIT;
   4061 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4062 	}
   4063 
   4064 	/* Configure the LCD with the extended configuration region in NVM */
   4065 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4066 		return rv;
   4067 
   4068 	/* Configure the LCD with the OEM bits in NVM */
   4069 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4070 
   4071 	if (sc->sc_type == WM_T_PCH2) {
   4072 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4073 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4074 			delay(10 * 1000);
   4075 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4076 		}
   4077 		/* Set EEE LPI Update Timer to 200usec */
   4078 		rv = sc->phy.acquire(sc);
   4079 		if (rv)
   4080 			return rv;
   4081 		rv = wm_write_emi_reg_locked(dev,
   4082 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4083 		sc->phy.release(sc);
   4084 	}
   4085 
   4086 	return rv;
   4087 }
   4088 
   4089 /* Only for PCH and newer */
   4090 static int
   4091 wm_write_smbus_addr(struct wm_softc *sc)
   4092 {
   4093 	uint32_t strap, freq;
   4094 	uint16_t phy_data;
   4095 	int rv;
   4096 
   4097 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4098 		device_xname(sc->sc_dev), __func__));
   4099 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4100 
   4101 	strap = CSR_READ(sc, WMREG_STRAP);
   4102 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4103 
   4104 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4105 	if (rv != 0)
   4106 		return -1;
   4107 
   4108 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4109 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4110 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4111 
   4112 	if (sc->sc_phytype == WMPHY_I217) {
   4113 		/* Restore SMBus frequency */
   4114 		if (freq --) {
   4115 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4116 			    | HV_SMB_ADDR_FREQ_HIGH);
   4117 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4118 			    HV_SMB_ADDR_FREQ_LOW);
   4119 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4120 			    HV_SMB_ADDR_FREQ_HIGH);
   4121 		} else
   4122 			DPRINTF(WM_DEBUG_INIT,
   4123 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4124 				device_xname(sc->sc_dev), __func__));
   4125 	}
   4126 
   4127 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4128 	    phy_data);
   4129 }
   4130 
   4131 static int
   4132 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4133 {
   4134 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4135 	uint16_t phy_page = 0;
   4136 	int rv = 0;
   4137 
   4138 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4139 		device_xname(sc->sc_dev), __func__));
   4140 
   4141 	switch (sc->sc_type) {
   4142 	case WM_T_ICH8:
   4143 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4144 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4145 			return 0;
   4146 
   4147 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4148 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4149 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4150 			break;
   4151 		}
   4152 		/* FALLTHROUGH */
   4153 	case WM_T_PCH:
   4154 	case WM_T_PCH2:
   4155 	case WM_T_PCH_LPT:
   4156 	case WM_T_PCH_SPT:
   4157 	case WM_T_PCH_CNP:
   4158 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4159 		break;
   4160 	default:
   4161 		return 0;
   4162 	}
   4163 
   4164 	if ((rv = sc->phy.acquire(sc)) != 0)
   4165 		return rv;
   4166 
   4167 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4168 	if ((reg & sw_cfg_mask) == 0)
   4169 		goto release;
   4170 
   4171 	/*
   4172 	 * Make sure HW does not configure LCD from PHY extended configuration
   4173 	 * before SW configuration
   4174 	 */
   4175 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4176 	if ((sc->sc_type < WM_T_PCH2)
   4177 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4178 		goto release;
   4179 
   4180 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4181 		device_xname(sc->sc_dev), __func__));
   4182 	/* word_addr is in DWORD */
   4183 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4184 
   4185 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4186 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4187 	if (cnf_size == 0)
   4188 		goto release;
   4189 
   4190 	if (((sc->sc_type == WM_T_PCH)
   4191 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4192 	    || (sc->sc_type > WM_T_PCH)) {
   4193 		/*
   4194 		 * HW configures the SMBus address and LEDs when the OEM and
   4195 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4196 		 * are cleared, SW will configure them instead.
   4197 		 */
   4198 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4199 			device_xname(sc->sc_dev), __func__));
   4200 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4201 			goto release;
   4202 
   4203 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4204 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4205 		    (uint16_t)reg);
   4206 		if (rv != 0)
   4207 			goto release;
   4208 	}
   4209 
   4210 	/* Configure LCD from extended configuration region. */
   4211 	for (i = 0; i < cnf_size; i++) {
   4212 		uint16_t reg_data, reg_addr;
   4213 
   4214 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4215 			goto release;
   4216 
   4217 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4218 			goto release;
   4219 
   4220 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4221 			phy_page = reg_data;
   4222 
   4223 		reg_addr &= IGPHY_MAXREGADDR;
   4224 		reg_addr |= phy_page;
   4225 
   4226 		KASSERT(sc->phy.writereg_locked != NULL);
   4227 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4228 		    reg_data);
   4229 	}
   4230 
   4231 release:
   4232 	sc->phy.release(sc);
   4233 	return rv;
   4234 }
   4235 
   4236 /*
   4237  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4238  *  @sc:       pointer to the HW structure
   4239  *  @d0_state: boolean if entering d0 or d3 device state
   4240  *
   4241  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4242  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4243  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4244  */
   4245 int
   4246 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4247 {
   4248 	uint32_t mac_reg;
   4249 	uint16_t oem_reg;
   4250 	int rv;
   4251 
   4252 	if (sc->sc_type < WM_T_PCH)
   4253 		return 0;
   4254 
   4255 	rv = sc->phy.acquire(sc);
   4256 	if (rv != 0)
   4257 		return rv;
   4258 
   4259 	if (sc->sc_type == WM_T_PCH) {
   4260 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4261 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4262 			goto release;
   4263 	}
   4264 
   4265 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4266 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4267 		goto release;
   4268 
   4269 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4270 
   4271 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4272 	if (rv != 0)
   4273 		goto release;
   4274 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4275 
   4276 	if (d0_state) {
   4277 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4278 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4279 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4280 			oem_reg |= HV_OEM_BITS_LPLU;
   4281 	} else {
   4282 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4283 		    != 0)
   4284 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4285 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4286 		    != 0)
   4287 			oem_reg |= HV_OEM_BITS_LPLU;
   4288 	}
   4289 
   4290 	/* Set Restart auto-neg to activate the bits */
   4291 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4292 	    && (wm_phy_resetisblocked(sc) == false))
   4293 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4294 
   4295 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4296 
   4297 release:
   4298 	sc->phy.release(sc);
   4299 
   4300 	return rv;
   4301 }
   4302 
   4303 /* Init hardware bits */
   4304 void
   4305 wm_initialize_hardware_bits(struct wm_softc *sc)
   4306 {
   4307 	uint32_t tarc0, tarc1, reg;
   4308 
   4309 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4310 		device_xname(sc->sc_dev), __func__));
   4311 
   4312 	/* For 82571 variant, 80003 and ICHs */
   4313 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4314 	    || (sc->sc_type >= WM_T_80003)) {
   4315 
   4316 		/* Transmit Descriptor Control 0 */
   4317 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4318 		reg |= TXDCTL_COUNT_DESC;
   4319 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4320 
   4321 		/* Transmit Descriptor Control 1 */
   4322 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4323 		reg |= TXDCTL_COUNT_DESC;
   4324 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4325 
   4326 		/* TARC0 */
   4327 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4328 		switch (sc->sc_type) {
   4329 		case WM_T_82571:
   4330 		case WM_T_82572:
   4331 		case WM_T_82573:
   4332 		case WM_T_82574:
   4333 		case WM_T_82583:
   4334 		case WM_T_80003:
   4335 			/* Clear bits 30..27 */
   4336 			tarc0 &= ~__BITS(30, 27);
   4337 			break;
   4338 		default:
   4339 			break;
   4340 		}
   4341 
   4342 		switch (sc->sc_type) {
   4343 		case WM_T_82571:
   4344 		case WM_T_82572:
   4345 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4346 
   4347 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4348 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4349 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4350 			/* 8257[12] Errata No.7 */
   4351 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4352 
   4353 			/* TARC1 bit 28 */
   4354 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4355 				tarc1 &= ~__BIT(28);
   4356 			else
   4357 				tarc1 |= __BIT(28);
   4358 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4359 
   4360 			/*
   4361 			 * 8257[12] Errata No.13
   4362 			 * Disable Dyamic Clock Gating.
   4363 			 */
   4364 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4365 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4366 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4367 			break;
   4368 		case WM_T_82573:
   4369 		case WM_T_82574:
   4370 		case WM_T_82583:
   4371 			if ((sc->sc_type == WM_T_82574)
   4372 			    || (sc->sc_type == WM_T_82583))
   4373 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4374 
   4375 			/* Extended Device Control */
   4376 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4377 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4378 			reg |= __BIT(22);	/* Set bit 22 */
   4379 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4380 
   4381 			/* Device Control */
   4382 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4383 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4384 
   4385 			/* PCIe Control Register */
   4386 			/*
   4387 			 * 82573 Errata (unknown).
   4388 			 *
   4389 			 * 82574 Errata 25 and 82583 Errata 12
   4390 			 * "Dropped Rx Packets":
   4391 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4392 			 */
   4393 			reg = CSR_READ(sc, WMREG_GCR);
   4394 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4395 			CSR_WRITE(sc, WMREG_GCR, reg);
   4396 
   4397 			if ((sc->sc_type == WM_T_82574)
   4398 			    || (sc->sc_type == WM_T_82583)) {
   4399 				/*
   4400 				 * Document says this bit must be set for
   4401 				 * proper operation.
   4402 				 */
   4403 				reg = CSR_READ(sc, WMREG_GCR);
   4404 				reg |= __BIT(22);
   4405 				CSR_WRITE(sc, WMREG_GCR, reg);
   4406 
   4407 				/*
   4408 				 * Apply workaround for hardware errata
   4409 				 * documented in errata docs Fixes issue where
   4410 				 * some error prone or unreliable PCIe
   4411 				 * completions are occurring, particularly
   4412 				 * with ASPM enabled. Without fix, issue can
   4413 				 * cause Tx timeouts.
   4414 				 */
   4415 				reg = CSR_READ(sc, WMREG_GCR2);
   4416 				reg |= __BIT(0);
   4417 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4418 			}
   4419 			break;
   4420 		case WM_T_80003:
   4421 			/* TARC0 */
   4422 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4423 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4424 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4425 
   4426 			/* TARC1 bit 28 */
   4427 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4428 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4429 				tarc1 &= ~__BIT(28);
   4430 			else
   4431 				tarc1 |= __BIT(28);
   4432 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4433 			break;
   4434 		case WM_T_ICH8:
   4435 		case WM_T_ICH9:
   4436 		case WM_T_ICH10:
   4437 		case WM_T_PCH:
   4438 		case WM_T_PCH2:
   4439 		case WM_T_PCH_LPT:
   4440 		case WM_T_PCH_SPT:
   4441 		case WM_T_PCH_CNP:
   4442 			/* TARC0 */
   4443 			if (sc->sc_type == WM_T_ICH8) {
   4444 				/* Set TARC0 bits 29 and 28 */
   4445 				tarc0 |= __BITS(29, 28);
   4446 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4447 				tarc0 |= __BIT(29);
   4448 				/*
   4449 				 *  Drop bit 28. From Linux.
   4450 				 * See I218/I219 spec update
   4451 				 * "5. Buffer Overrun While the I219 is
   4452 				 * Processing DMA Transactions"
   4453 				 */
   4454 				tarc0 &= ~__BIT(28);
   4455 			}
   4456 			/* Set TARC0 bits 23,24,26,27 */
   4457 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4458 
   4459 			/* CTRL_EXT */
   4460 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4461 			reg |= __BIT(22);	/* Set bit 22 */
   4462 			/*
   4463 			 * Enable PHY low-power state when MAC is at D3
   4464 			 * w/o WoL
   4465 			 */
   4466 			if (sc->sc_type >= WM_T_PCH)
   4467 				reg |= CTRL_EXT_PHYPDEN;
   4468 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4469 
   4470 			/* TARC1 */
   4471 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4472 			/* bit 28 */
   4473 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4474 				tarc1 &= ~__BIT(28);
   4475 			else
   4476 				tarc1 |= __BIT(28);
   4477 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4478 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4479 
   4480 			/* Device Status */
   4481 			if (sc->sc_type == WM_T_ICH8) {
   4482 				reg = CSR_READ(sc, WMREG_STATUS);
   4483 				reg &= ~__BIT(31);
   4484 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4485 
   4486 			}
   4487 
   4488 			/* IOSFPC */
   4489 			if (sc->sc_type == WM_T_PCH_SPT) {
   4490 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4491 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4492 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4493 			}
   4494 			/*
   4495 			 * Work-around descriptor data corruption issue during
   4496 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4497 			 * capability.
   4498 			 */
   4499 			reg = CSR_READ(sc, WMREG_RFCTL);
   4500 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4501 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4502 			break;
   4503 		default:
   4504 			break;
   4505 		}
   4506 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4507 
   4508 		switch (sc->sc_type) {
   4509 		/*
   4510 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4511 		 * Avoid RSS Hash Value bug.
   4512 		 */
   4513 		case WM_T_82571:
   4514 		case WM_T_82572:
   4515 		case WM_T_82573:
   4516 		case WM_T_80003:
   4517 		case WM_T_ICH8:
   4518 			reg = CSR_READ(sc, WMREG_RFCTL);
   4519 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4520 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4521 			break;
   4522 		case WM_T_82574:
   4523 			/* Use extened Rx descriptor. */
   4524 			reg = CSR_READ(sc, WMREG_RFCTL);
   4525 			reg |= WMREG_RFCTL_EXSTEN;
   4526 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4527 			break;
   4528 		default:
   4529 			break;
   4530 		}
   4531 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4532 		/*
   4533 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4534 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4535 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4536 		 * Correctly by the Device"
   4537 		 *
   4538 		 * I354(C2000) Errata AVR53:
   4539 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4540 		 * Hang"
   4541 		 */
   4542 		reg = CSR_READ(sc, WMREG_RFCTL);
   4543 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4544 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4545 	}
   4546 }
   4547 
   4548 static uint32_t
   4549 wm_rxpbs_adjust_82580(uint32_t val)
   4550 {
   4551 	uint32_t rv = 0;
   4552 
   4553 	if (val < __arraycount(wm_82580_rxpbs_table))
   4554 		rv = wm_82580_rxpbs_table[val];
   4555 
   4556 	return rv;
   4557 }
   4558 
   4559 /*
   4560  * wm_reset_phy:
   4561  *
   4562  *	generic PHY reset function.
   4563  *	Same as e1000_phy_hw_reset_generic()
   4564  */
   4565 static int
   4566 wm_reset_phy(struct wm_softc *sc)
   4567 {
   4568 	uint32_t reg;
   4569 
   4570 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4571 		device_xname(sc->sc_dev), __func__));
   4572 	if (wm_phy_resetisblocked(sc))
   4573 		return -1;
   4574 
   4575 	sc->phy.acquire(sc);
   4576 
   4577 	reg = CSR_READ(sc, WMREG_CTRL);
   4578 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4579 	CSR_WRITE_FLUSH(sc);
   4580 
   4581 	delay(sc->phy.reset_delay_us);
   4582 
   4583 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4584 	CSR_WRITE_FLUSH(sc);
   4585 
   4586 	delay(150);
   4587 
   4588 	sc->phy.release(sc);
   4589 
   4590 	wm_get_cfg_done(sc);
   4591 	wm_phy_post_reset(sc);
   4592 
   4593 	return 0;
   4594 }
   4595 
   4596 /*
   4597  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4598  * so it is enough to check sc->sc_queue[0] only.
   4599  */
   4600 static void
   4601 wm_flush_desc_rings(struct wm_softc *sc)
   4602 {
   4603 	pcireg_t preg;
   4604 	uint32_t reg;
   4605 	struct wm_txqueue *txq;
   4606 	wiseman_txdesc_t *txd;
   4607 	int nexttx;
   4608 	uint32_t rctl;
   4609 
   4610 	/* First, disable MULR fix in FEXTNVM11 */
   4611 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4612 	reg |= FEXTNVM11_DIS_MULRFIX;
   4613 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4614 
   4615 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4616 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4617 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4618 		return;
   4619 
   4620 	/* TX */
   4621 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4622 	    preg, reg);
   4623 	reg = CSR_READ(sc, WMREG_TCTL);
   4624 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4625 
   4626 	txq = &sc->sc_queue[0].wmq_txq;
   4627 	nexttx = txq->txq_next;
   4628 	txd = &txq->txq_descs[nexttx];
   4629 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4630 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4631 	txd->wtx_fields.wtxu_status = 0;
   4632 	txd->wtx_fields.wtxu_options = 0;
   4633 	txd->wtx_fields.wtxu_vlan = 0;
   4634 
   4635 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4636 	    BUS_SPACE_BARRIER_WRITE);
   4637 
   4638 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4639 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4640 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4641 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4642 	delay(250);
   4643 
   4644 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4645 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4646 		return;
   4647 
   4648 	/* RX */
   4649 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4650 	rctl = CSR_READ(sc, WMREG_RCTL);
   4651 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4652 	CSR_WRITE_FLUSH(sc);
   4653 	delay(150);
   4654 
   4655 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4656 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4657 	reg &= 0xffffc000;
   4658 	/*
   4659 	 * Update thresholds: prefetch threshold to 31, host threshold
   4660 	 * to 1 and make sure the granularity is "descriptors" and not
   4661 	 * "cache lines"
   4662 	 */
   4663 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4664 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4665 
   4666 	/* Momentarily enable the RX ring for the changes to take effect */
   4667 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4668 	CSR_WRITE_FLUSH(sc);
   4669 	delay(150);
   4670 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4671 }
   4672 
   4673 /*
   4674  * wm_reset:
   4675  *
   4676  *	Reset the i82542 chip.
   4677  */
   4678 static void
   4679 wm_reset(struct wm_softc *sc)
   4680 {
   4681 	int phy_reset = 0;
   4682 	int i, error = 0;
   4683 	uint32_t reg;
   4684 	uint16_t kmreg;
   4685 	int rv;
   4686 
   4687 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4688 		device_xname(sc->sc_dev), __func__));
   4689 	KASSERT(sc->sc_type != 0);
   4690 
   4691 	/*
   4692 	 * Allocate on-chip memory according to the MTU size.
   4693 	 * The Packet Buffer Allocation register must be written
   4694 	 * before the chip is reset.
   4695 	 */
   4696 	switch (sc->sc_type) {
   4697 	case WM_T_82547:
   4698 	case WM_T_82547_2:
   4699 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4700 		    PBA_22K : PBA_30K;
   4701 		for (i = 0; i < sc->sc_nqueues; i++) {
   4702 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4703 			txq->txq_fifo_head = 0;
   4704 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4705 			txq->txq_fifo_size =
   4706 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4707 			txq->txq_fifo_stall = 0;
   4708 		}
   4709 		break;
   4710 	case WM_T_82571:
   4711 	case WM_T_82572:
   4712 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4713 	case WM_T_80003:
   4714 		sc->sc_pba = PBA_32K;
   4715 		break;
   4716 	case WM_T_82573:
   4717 		sc->sc_pba = PBA_12K;
   4718 		break;
   4719 	case WM_T_82574:
   4720 	case WM_T_82583:
   4721 		sc->sc_pba = PBA_20K;
   4722 		break;
   4723 	case WM_T_82576:
   4724 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4725 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4726 		break;
   4727 	case WM_T_82580:
   4728 	case WM_T_I350:
   4729 	case WM_T_I354:
   4730 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4731 		break;
   4732 	case WM_T_I210:
   4733 	case WM_T_I211:
   4734 		sc->sc_pba = PBA_34K;
   4735 		break;
   4736 	case WM_T_ICH8:
   4737 		/* Workaround for a bit corruption issue in FIFO memory */
   4738 		sc->sc_pba = PBA_8K;
   4739 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4740 		break;
   4741 	case WM_T_ICH9:
   4742 	case WM_T_ICH10:
   4743 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4744 		    PBA_14K : PBA_10K;
   4745 		break;
   4746 	case WM_T_PCH:
   4747 	case WM_T_PCH2:	/* XXX 14K? */
   4748 	case WM_T_PCH_LPT:
   4749 	case WM_T_PCH_SPT:
   4750 	case WM_T_PCH_CNP:
   4751 		sc->sc_pba = PBA_26K;
   4752 		break;
   4753 	default:
   4754 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4755 		    PBA_40K : PBA_48K;
   4756 		break;
   4757 	}
   4758 	/*
   4759 	 * Only old or non-multiqueue devices have the PBA register
   4760 	 * XXX Need special handling for 82575.
   4761 	 */
   4762 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4763 	    || (sc->sc_type == WM_T_82575))
   4764 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4765 
   4766 	/* Prevent the PCI-E bus from sticking */
   4767 	if (sc->sc_flags & WM_F_PCIE) {
   4768 		int timeout = 800;
   4769 
   4770 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4771 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4772 
   4773 		while (timeout--) {
   4774 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4775 			    == 0)
   4776 				break;
   4777 			delay(100);
   4778 		}
   4779 		if (timeout == 0)
   4780 			device_printf(sc->sc_dev,
   4781 			    "failed to disable busmastering\n");
   4782 	}
   4783 
   4784 	/* Set the completion timeout for interface */
   4785 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4786 	    || (sc->sc_type == WM_T_82580)
   4787 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4788 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4789 		wm_set_pcie_completion_timeout(sc);
   4790 
   4791 	/* Clear interrupt */
   4792 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4793 	if (wm_is_using_msix(sc)) {
   4794 		if (sc->sc_type != WM_T_82574) {
   4795 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4796 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4797 		} else
   4798 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4799 	}
   4800 
   4801 	/* Stop the transmit and receive processes. */
   4802 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4803 	sc->sc_rctl &= ~RCTL_EN;
   4804 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4805 	CSR_WRITE_FLUSH(sc);
   4806 
   4807 	/* XXX set_tbi_sbp_82543() */
   4808 
   4809 	delay(10*1000);
   4810 
   4811 	/* Must acquire the MDIO ownership before MAC reset */
   4812 	switch (sc->sc_type) {
   4813 	case WM_T_82573:
   4814 	case WM_T_82574:
   4815 	case WM_T_82583:
   4816 		error = wm_get_hw_semaphore_82573(sc);
   4817 		break;
   4818 	default:
   4819 		break;
   4820 	}
   4821 
   4822 	/*
   4823 	 * 82541 Errata 29? & 82547 Errata 28?
   4824 	 * See also the description about PHY_RST bit in CTRL register
   4825 	 * in 8254x_GBe_SDM.pdf.
   4826 	 */
   4827 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4828 		CSR_WRITE(sc, WMREG_CTRL,
   4829 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4830 		CSR_WRITE_FLUSH(sc);
   4831 		delay(5000);
   4832 	}
   4833 
   4834 	switch (sc->sc_type) {
   4835 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4836 	case WM_T_82541:
   4837 	case WM_T_82541_2:
   4838 	case WM_T_82547:
   4839 	case WM_T_82547_2:
   4840 		/*
   4841 		 * On some chipsets, a reset through a memory-mapped write
   4842 		 * cycle can cause the chip to reset before completing the
   4843 		 * write cycle. This causes major headache that can be avoided
   4844 		 * by issuing the reset via indirect register writes through
   4845 		 * I/O space.
   4846 		 *
   4847 		 * So, if we successfully mapped the I/O BAR at attach time,
   4848 		 * use that. Otherwise, try our luck with a memory-mapped
   4849 		 * reset.
   4850 		 */
   4851 		if (sc->sc_flags & WM_F_IOH_VALID)
   4852 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4853 		else
   4854 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4855 		break;
   4856 	case WM_T_82545_3:
   4857 	case WM_T_82546_3:
   4858 		/* Use the shadow control register on these chips. */
   4859 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4860 		break;
   4861 	case WM_T_80003:
   4862 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4863 		sc->phy.acquire(sc);
   4864 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4865 		sc->phy.release(sc);
   4866 		break;
   4867 	case WM_T_ICH8:
   4868 	case WM_T_ICH9:
   4869 	case WM_T_ICH10:
   4870 	case WM_T_PCH:
   4871 	case WM_T_PCH2:
   4872 	case WM_T_PCH_LPT:
   4873 	case WM_T_PCH_SPT:
   4874 	case WM_T_PCH_CNP:
   4875 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4876 		if (wm_phy_resetisblocked(sc) == false) {
   4877 			/*
   4878 			 * Gate automatic PHY configuration by hardware on
   4879 			 * non-managed 82579
   4880 			 */
   4881 			if ((sc->sc_type == WM_T_PCH2)
   4882 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4883 				== 0))
   4884 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4885 
   4886 			reg |= CTRL_PHY_RESET;
   4887 			phy_reset = 1;
   4888 		} else
   4889 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   4890 		sc->phy.acquire(sc);
   4891 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4892 		/* Don't insert a completion barrier when reset */
   4893 		delay(20*1000);
   4894 		mutex_exit(sc->sc_ich_phymtx);
   4895 		break;
   4896 	case WM_T_82580:
   4897 	case WM_T_I350:
   4898 	case WM_T_I354:
   4899 	case WM_T_I210:
   4900 	case WM_T_I211:
   4901 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4902 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4903 			CSR_WRITE_FLUSH(sc);
   4904 		delay(5000);
   4905 		break;
   4906 	case WM_T_82542_2_0:
   4907 	case WM_T_82542_2_1:
   4908 	case WM_T_82543:
   4909 	case WM_T_82540:
   4910 	case WM_T_82545:
   4911 	case WM_T_82546:
   4912 	case WM_T_82571:
   4913 	case WM_T_82572:
   4914 	case WM_T_82573:
   4915 	case WM_T_82574:
   4916 	case WM_T_82575:
   4917 	case WM_T_82576:
   4918 	case WM_T_82583:
   4919 	default:
   4920 		/* Everything else can safely use the documented method. */
   4921 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4922 		break;
   4923 	}
   4924 
   4925 	/* Must release the MDIO ownership after MAC reset */
   4926 	switch (sc->sc_type) {
   4927 	case WM_T_82573:
   4928 	case WM_T_82574:
   4929 	case WM_T_82583:
   4930 		if (error == 0)
   4931 			wm_put_hw_semaphore_82573(sc);
   4932 		break;
   4933 	default:
   4934 		break;
   4935 	}
   4936 
   4937 	/* Set Phy Config Counter to 50msec */
   4938 	if (sc->sc_type == WM_T_PCH2) {
   4939 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4940 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4941 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4942 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4943 	}
   4944 
   4945 	if (phy_reset != 0)
   4946 		wm_get_cfg_done(sc);
   4947 
   4948 	/* Reload EEPROM */
   4949 	switch (sc->sc_type) {
   4950 	case WM_T_82542_2_0:
   4951 	case WM_T_82542_2_1:
   4952 	case WM_T_82543:
   4953 	case WM_T_82544:
   4954 		delay(10);
   4955 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4956 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4957 		CSR_WRITE_FLUSH(sc);
   4958 		delay(2000);
   4959 		break;
   4960 	case WM_T_82540:
   4961 	case WM_T_82545:
   4962 	case WM_T_82545_3:
   4963 	case WM_T_82546:
   4964 	case WM_T_82546_3:
   4965 		delay(5*1000);
   4966 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4967 		break;
   4968 	case WM_T_82541:
   4969 	case WM_T_82541_2:
   4970 	case WM_T_82547:
   4971 	case WM_T_82547_2:
   4972 		delay(20000);
   4973 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4974 		break;
   4975 	case WM_T_82571:
   4976 	case WM_T_82572:
   4977 	case WM_T_82573:
   4978 	case WM_T_82574:
   4979 	case WM_T_82583:
   4980 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4981 			delay(10);
   4982 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4983 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4984 			CSR_WRITE_FLUSH(sc);
   4985 		}
   4986 		/* check EECD_EE_AUTORD */
   4987 		wm_get_auto_rd_done(sc);
   4988 		/*
   4989 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4990 		 * is set.
   4991 		 */
   4992 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4993 		    || (sc->sc_type == WM_T_82583))
   4994 			delay(25*1000);
   4995 		break;
   4996 	case WM_T_82575:
   4997 	case WM_T_82576:
   4998 	case WM_T_82580:
   4999 	case WM_T_I350:
   5000 	case WM_T_I354:
   5001 	case WM_T_I210:
   5002 	case WM_T_I211:
   5003 	case WM_T_80003:
   5004 		/* check EECD_EE_AUTORD */
   5005 		wm_get_auto_rd_done(sc);
   5006 		break;
   5007 	case WM_T_ICH8:
   5008 	case WM_T_ICH9:
   5009 	case WM_T_ICH10:
   5010 	case WM_T_PCH:
   5011 	case WM_T_PCH2:
   5012 	case WM_T_PCH_LPT:
   5013 	case WM_T_PCH_SPT:
   5014 	case WM_T_PCH_CNP:
   5015 		break;
   5016 	default:
   5017 		panic("%s: unknown type\n", __func__);
   5018 	}
   5019 
   5020 	/* Check whether EEPROM is present or not */
   5021 	switch (sc->sc_type) {
   5022 	case WM_T_82575:
   5023 	case WM_T_82576:
   5024 	case WM_T_82580:
   5025 	case WM_T_I350:
   5026 	case WM_T_I354:
   5027 	case WM_T_ICH8:
   5028 	case WM_T_ICH9:
   5029 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5030 			/* Not found */
   5031 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5032 			if (sc->sc_type == WM_T_82575)
   5033 				wm_reset_init_script_82575(sc);
   5034 		}
   5035 		break;
   5036 	default:
   5037 		break;
   5038 	}
   5039 
   5040 	if (phy_reset != 0)
   5041 		wm_phy_post_reset(sc);
   5042 
   5043 	if ((sc->sc_type == WM_T_82580)
   5044 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5045 		/* Clear global device reset status bit */
   5046 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5047 	}
   5048 
   5049 	/* Clear any pending interrupt events. */
   5050 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5051 	reg = CSR_READ(sc, WMREG_ICR);
   5052 	if (wm_is_using_msix(sc)) {
   5053 		if (sc->sc_type != WM_T_82574) {
   5054 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5055 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5056 		} else
   5057 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5058 	}
   5059 
   5060 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5061 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5062 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5063 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5064 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5065 		reg |= KABGTXD_BGSQLBIAS;
   5066 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5067 	}
   5068 
   5069 	/* Reload sc_ctrl */
   5070 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5071 
   5072 	wm_set_eee(sc);
   5073 
   5074 	/*
   5075 	 * For PCH, this write will make sure that any noise will be detected
   5076 	 * as a CRC error and be dropped rather than show up as a bad packet
   5077 	 * to the DMA engine
   5078 	 */
   5079 	if (sc->sc_type == WM_T_PCH)
   5080 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5081 
   5082 	if (sc->sc_type >= WM_T_82544)
   5083 		CSR_WRITE(sc, WMREG_WUC, 0);
   5084 
   5085 	if (sc->sc_type < WM_T_82575)
   5086 		wm_disable_aspm(sc); /* Workaround for some chips */
   5087 
   5088 	wm_reset_mdicnfg_82580(sc);
   5089 
   5090 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5091 		wm_pll_workaround_i210(sc);
   5092 
   5093 	if (sc->sc_type == WM_T_80003) {
   5094 		/* Default to TRUE to enable the MDIC W/A */
   5095 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5096 
   5097 		rv = wm_kmrn_readreg(sc,
   5098 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5099 		if (rv == 0) {
   5100 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5101 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5102 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5103 			else
   5104 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5105 		}
   5106 	}
   5107 }
   5108 
   5109 /*
   5110  * wm_add_rxbuf:
   5111  *
   5112  *	Add a receive buffer to the indiciated descriptor.
   5113  */
   5114 static int
   5115 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5116 {
   5117 	struct wm_softc *sc = rxq->rxq_sc;
   5118 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5119 	struct mbuf *m;
   5120 	int error;
   5121 
   5122 	KASSERT(mutex_owned(rxq->rxq_lock));
   5123 
   5124 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5125 	if (m == NULL)
   5126 		return ENOBUFS;
   5127 
   5128 	MCLGET(m, M_DONTWAIT);
   5129 	if ((m->m_flags & M_EXT) == 0) {
   5130 		m_freem(m);
   5131 		return ENOBUFS;
   5132 	}
   5133 
   5134 	if (rxs->rxs_mbuf != NULL)
   5135 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5136 
   5137 	rxs->rxs_mbuf = m;
   5138 
   5139 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5140 	/*
   5141 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5142 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5143 	 */
   5144 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5145 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5146 	if (error) {
   5147 		/* XXX XXX XXX */
   5148 		aprint_error_dev(sc->sc_dev,
   5149 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5150 		panic("wm_add_rxbuf");
   5151 	}
   5152 
   5153 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5154 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5155 
   5156 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5157 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5158 			wm_init_rxdesc(rxq, idx);
   5159 	} else
   5160 		wm_init_rxdesc(rxq, idx);
   5161 
   5162 	return 0;
   5163 }
   5164 
   5165 /*
   5166  * wm_rxdrain:
   5167  *
   5168  *	Drain the receive queue.
   5169  */
   5170 static void
   5171 wm_rxdrain(struct wm_rxqueue *rxq)
   5172 {
   5173 	struct wm_softc *sc = rxq->rxq_sc;
   5174 	struct wm_rxsoft *rxs;
   5175 	int i;
   5176 
   5177 	KASSERT(mutex_owned(rxq->rxq_lock));
   5178 
   5179 	for (i = 0; i < WM_NRXDESC; i++) {
   5180 		rxs = &rxq->rxq_soft[i];
   5181 		if (rxs->rxs_mbuf != NULL) {
   5182 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5183 			m_freem(rxs->rxs_mbuf);
   5184 			rxs->rxs_mbuf = NULL;
   5185 		}
   5186 	}
   5187 }
   5188 
   5189 /*
   5190  * Setup registers for RSS.
   5191  *
   5192  * XXX not yet VMDq support
   5193  */
   5194 static void
   5195 wm_init_rss(struct wm_softc *sc)
   5196 {
   5197 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5198 	int i;
   5199 
   5200 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5201 
   5202 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5203 		unsigned int qid, reta_ent;
   5204 
   5205 		qid  = i % sc->sc_nqueues;
   5206 		switch (sc->sc_type) {
   5207 		case WM_T_82574:
   5208 			reta_ent = __SHIFTIN(qid,
   5209 			    RETA_ENT_QINDEX_MASK_82574);
   5210 			break;
   5211 		case WM_T_82575:
   5212 			reta_ent = __SHIFTIN(qid,
   5213 			    RETA_ENT_QINDEX1_MASK_82575);
   5214 			break;
   5215 		default:
   5216 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5217 			break;
   5218 		}
   5219 
   5220 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5221 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5222 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5223 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5224 	}
   5225 
   5226 	rss_getkey((uint8_t *)rss_key);
   5227 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5228 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5229 
   5230 	if (sc->sc_type == WM_T_82574)
   5231 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5232 	else
   5233 		mrqc = MRQC_ENABLE_RSS_MQ;
   5234 
   5235 	/*
   5236 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5237 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5238 	 */
   5239 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5240 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5241 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5242 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5243 
   5244 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5245 }
   5246 
   5247 /*
   5248  * Adjust TX and RX queue numbers which the system actulally uses.
   5249  *
   5250  * The numbers are affected by below parameters.
   5251  *     - The nubmer of hardware queues
   5252  *     - The number of MSI-X vectors (= "nvectors" argument)
   5253  *     - ncpu
   5254  */
   5255 static void
   5256 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5257 {
   5258 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5259 
   5260 	if (nvectors < 2) {
   5261 		sc->sc_nqueues = 1;
   5262 		return;
   5263 	}
   5264 
   5265 	switch (sc->sc_type) {
   5266 	case WM_T_82572:
   5267 		hw_ntxqueues = 2;
   5268 		hw_nrxqueues = 2;
   5269 		break;
   5270 	case WM_T_82574:
   5271 		hw_ntxqueues = 2;
   5272 		hw_nrxqueues = 2;
   5273 		break;
   5274 	case WM_T_82575:
   5275 		hw_ntxqueues = 4;
   5276 		hw_nrxqueues = 4;
   5277 		break;
   5278 	case WM_T_82576:
   5279 		hw_ntxqueues = 16;
   5280 		hw_nrxqueues = 16;
   5281 		break;
   5282 	case WM_T_82580:
   5283 	case WM_T_I350:
   5284 	case WM_T_I354:
   5285 		hw_ntxqueues = 8;
   5286 		hw_nrxqueues = 8;
   5287 		break;
   5288 	case WM_T_I210:
   5289 		hw_ntxqueues = 4;
   5290 		hw_nrxqueues = 4;
   5291 		break;
   5292 	case WM_T_I211:
   5293 		hw_ntxqueues = 2;
   5294 		hw_nrxqueues = 2;
   5295 		break;
   5296 		/*
   5297 		 * As below ethernet controllers does not support MSI-X,
   5298 		 * this driver let them not use multiqueue.
   5299 		 *     - WM_T_80003
   5300 		 *     - WM_T_ICH8
   5301 		 *     - WM_T_ICH9
   5302 		 *     - WM_T_ICH10
   5303 		 *     - WM_T_PCH
   5304 		 *     - WM_T_PCH2
   5305 		 *     - WM_T_PCH_LPT
   5306 		 */
   5307 	default:
   5308 		hw_ntxqueues = 1;
   5309 		hw_nrxqueues = 1;
   5310 		break;
   5311 	}
   5312 
   5313 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5314 
   5315 	/*
   5316 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5317 	 * the number of queues used actually.
   5318 	 */
   5319 	if (nvectors < hw_nqueues + 1)
   5320 		sc->sc_nqueues = nvectors - 1;
   5321 	else
   5322 		sc->sc_nqueues = hw_nqueues;
   5323 
   5324 	/*
   5325 	 * As queues more then cpus cannot improve scaling, we limit
   5326 	 * the number of queues used actually.
   5327 	 */
   5328 	if (ncpu < sc->sc_nqueues)
   5329 		sc->sc_nqueues = ncpu;
   5330 }
   5331 
   5332 static inline bool
   5333 wm_is_using_msix(struct wm_softc *sc)
   5334 {
   5335 
   5336 	return (sc->sc_nintrs > 1);
   5337 }
   5338 
   5339 static inline bool
   5340 wm_is_using_multiqueue(struct wm_softc *sc)
   5341 {
   5342 
   5343 	return (sc->sc_nqueues > 1);
   5344 }
   5345 
   5346 static int
   5347 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5348 {
   5349 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5350 	wmq->wmq_id = qidx;
   5351 	wmq->wmq_intr_idx = intr_idx;
   5352 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5353 #ifdef WM_MPSAFE
   5354 	    | SOFTINT_MPSAFE
   5355 #endif
   5356 	    , wm_handle_queue, wmq);
   5357 	if (wmq->wmq_si != NULL)
   5358 		return 0;
   5359 
   5360 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5361 	    wmq->wmq_id);
   5362 
   5363 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5364 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5365 	return ENOMEM;
   5366 }
   5367 
   5368 /*
   5369  * Both single interrupt MSI and INTx can use this function.
   5370  */
   5371 static int
   5372 wm_setup_legacy(struct wm_softc *sc)
   5373 {
   5374 	pci_chipset_tag_t pc = sc->sc_pc;
   5375 	const char *intrstr = NULL;
   5376 	char intrbuf[PCI_INTRSTR_LEN];
   5377 	int error;
   5378 
   5379 	error = wm_alloc_txrx_queues(sc);
   5380 	if (error) {
   5381 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5382 		    error);
   5383 		return ENOMEM;
   5384 	}
   5385 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5386 	    sizeof(intrbuf));
   5387 #ifdef WM_MPSAFE
   5388 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5389 #endif
   5390 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5391 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5392 	if (sc->sc_ihs[0] == NULL) {
   5393 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5394 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5395 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5396 		return ENOMEM;
   5397 	}
   5398 
   5399 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5400 	sc->sc_nintrs = 1;
   5401 
   5402 	return wm_softint_establish(sc, 0, 0);
   5403 }
   5404 
   5405 static int
   5406 wm_setup_msix(struct wm_softc *sc)
   5407 {
   5408 	void *vih;
   5409 	kcpuset_t *affinity;
   5410 	int qidx, error, intr_idx, txrx_established;
   5411 	pci_chipset_tag_t pc = sc->sc_pc;
   5412 	const char *intrstr = NULL;
   5413 	char intrbuf[PCI_INTRSTR_LEN];
   5414 	char intr_xname[INTRDEVNAMEBUF];
   5415 
   5416 	if (sc->sc_nqueues < ncpu) {
   5417 		/*
   5418 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5419 		 * interrupts start from CPU#1.
   5420 		 */
   5421 		sc->sc_affinity_offset = 1;
   5422 	} else {
   5423 		/*
   5424 		 * In this case, this device use all CPUs. So, we unify
   5425 		 * affinitied cpu_index to msix vector number for readability.
   5426 		 */
   5427 		sc->sc_affinity_offset = 0;
   5428 	}
   5429 
   5430 	error = wm_alloc_txrx_queues(sc);
   5431 	if (error) {
   5432 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5433 		    error);
   5434 		return ENOMEM;
   5435 	}
   5436 
   5437 	kcpuset_create(&affinity, false);
   5438 	intr_idx = 0;
   5439 
   5440 	/*
   5441 	 * TX and RX
   5442 	 */
   5443 	txrx_established = 0;
   5444 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5445 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5446 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5447 
   5448 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5449 		    sizeof(intrbuf));
   5450 #ifdef WM_MPSAFE
   5451 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5452 		    PCI_INTR_MPSAFE, true);
   5453 #endif
   5454 		memset(intr_xname, 0, sizeof(intr_xname));
   5455 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5456 		    device_xname(sc->sc_dev), qidx);
   5457 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5458 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5459 		if (vih == NULL) {
   5460 			aprint_error_dev(sc->sc_dev,
   5461 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5462 			    intrstr ? " at " : "",
   5463 			    intrstr ? intrstr : "");
   5464 
   5465 			goto fail;
   5466 		}
   5467 		kcpuset_zero(affinity);
   5468 		/* Round-robin affinity */
   5469 		kcpuset_set(affinity, affinity_to);
   5470 		error = interrupt_distribute(vih, affinity, NULL);
   5471 		if (error == 0) {
   5472 			aprint_normal_dev(sc->sc_dev,
   5473 			    "for TX and RX interrupting at %s affinity to %u\n",
   5474 			    intrstr, affinity_to);
   5475 		} else {
   5476 			aprint_normal_dev(sc->sc_dev,
   5477 			    "for TX and RX interrupting at %s\n", intrstr);
   5478 		}
   5479 		sc->sc_ihs[intr_idx] = vih;
   5480 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5481 			goto fail;
   5482 		txrx_established++;
   5483 		intr_idx++;
   5484 	}
   5485 
   5486 	/* LINK */
   5487 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5488 	    sizeof(intrbuf));
   5489 #ifdef WM_MPSAFE
   5490 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5491 #endif
   5492 	memset(intr_xname, 0, sizeof(intr_xname));
   5493 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5494 	    device_xname(sc->sc_dev));
   5495 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5496 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5497 	if (vih == NULL) {
   5498 		aprint_error_dev(sc->sc_dev,
   5499 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5500 		    intrstr ? " at " : "",
   5501 		    intrstr ? intrstr : "");
   5502 
   5503 		goto fail;
   5504 	}
   5505 	/* Keep default affinity to LINK interrupt */
   5506 	aprint_normal_dev(sc->sc_dev,
   5507 	    "for LINK interrupting at %s\n", intrstr);
   5508 	sc->sc_ihs[intr_idx] = vih;
   5509 	sc->sc_link_intr_idx = intr_idx;
   5510 
   5511 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5512 	kcpuset_destroy(affinity);
   5513 	return 0;
   5514 
   5515  fail:
   5516 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5517 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5518 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5519 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5520 	}
   5521 
   5522 	kcpuset_destroy(affinity);
   5523 	return ENOMEM;
   5524 }
   5525 
   5526 static void
   5527 wm_unset_stopping_flags(struct wm_softc *sc)
   5528 {
   5529 	int i;
   5530 
   5531 	KASSERT(WM_CORE_LOCKED(sc));
   5532 
   5533 	/* Must unset stopping flags in ascending order. */
   5534 	for (i = 0; i < sc->sc_nqueues; i++) {
   5535 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5536 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5537 
   5538 		mutex_enter(txq->txq_lock);
   5539 		txq->txq_stopping = false;
   5540 		mutex_exit(txq->txq_lock);
   5541 
   5542 		mutex_enter(rxq->rxq_lock);
   5543 		rxq->rxq_stopping = false;
   5544 		mutex_exit(rxq->rxq_lock);
   5545 	}
   5546 
   5547 	sc->sc_core_stopping = false;
   5548 }
   5549 
   5550 static void
   5551 wm_set_stopping_flags(struct wm_softc *sc)
   5552 {
   5553 	int i;
   5554 
   5555 	KASSERT(WM_CORE_LOCKED(sc));
   5556 
   5557 	sc->sc_core_stopping = true;
   5558 
   5559 	/* Must set stopping flags in ascending order. */
   5560 	for (i = 0; i < sc->sc_nqueues; i++) {
   5561 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5562 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5563 
   5564 		mutex_enter(rxq->rxq_lock);
   5565 		rxq->rxq_stopping = true;
   5566 		mutex_exit(rxq->rxq_lock);
   5567 
   5568 		mutex_enter(txq->txq_lock);
   5569 		txq->txq_stopping = true;
   5570 		mutex_exit(txq->txq_lock);
   5571 	}
   5572 }
   5573 
   5574 /*
   5575  * Write interrupt interval value to ITR or EITR
   5576  */
   5577 static void
   5578 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5579 {
   5580 
   5581 	if (!wmq->wmq_set_itr)
   5582 		return;
   5583 
   5584 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5585 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5586 
   5587 		/*
   5588 		 * 82575 doesn't have CNT_INGR field.
   5589 		 * So, overwrite counter field by software.
   5590 		 */
   5591 		if (sc->sc_type == WM_T_82575)
   5592 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5593 		else
   5594 			eitr |= EITR_CNT_INGR;
   5595 
   5596 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5597 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5598 		/*
   5599 		 * 82574 has both ITR and EITR. SET EITR when we use
   5600 		 * the multi queue function with MSI-X.
   5601 		 */
   5602 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5603 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5604 	} else {
   5605 		KASSERT(wmq->wmq_id == 0);
   5606 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5607 	}
   5608 
   5609 	wmq->wmq_set_itr = false;
   5610 }
   5611 
   5612 /*
   5613  * TODO
   5614  * Below dynamic calculation of itr is almost the same as linux igb,
   5615  * however it does not fit to wm(4). So, we will have been disable AIM
   5616  * until we will find appropriate calculation of itr.
   5617  */
   5618 /*
   5619  * calculate interrupt interval value to be going to write register in
   5620  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5621  */
   5622 static void
   5623 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5624 {
   5625 #ifdef NOTYET
   5626 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5627 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5628 	uint32_t avg_size = 0;
   5629 	uint32_t new_itr;
   5630 
   5631 	if (rxq->rxq_packets)
   5632 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5633 	if (txq->txq_packets)
   5634 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5635 
   5636 	if (avg_size == 0) {
   5637 		new_itr = 450; /* restore default value */
   5638 		goto out;
   5639 	}
   5640 
   5641 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5642 	avg_size += 24;
   5643 
   5644 	/* Don't starve jumbo frames */
   5645 	avg_size = uimin(avg_size, 3000);
   5646 
   5647 	/* Give a little boost to mid-size frames */
   5648 	if ((avg_size > 300) && (avg_size < 1200))
   5649 		new_itr = avg_size / 3;
   5650 	else
   5651 		new_itr = avg_size / 2;
   5652 
   5653 out:
   5654 	/*
   5655 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5656 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5657 	 */
   5658 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5659 		new_itr *= 4;
   5660 
   5661 	if (new_itr != wmq->wmq_itr) {
   5662 		wmq->wmq_itr = new_itr;
   5663 		wmq->wmq_set_itr = true;
   5664 	} else
   5665 		wmq->wmq_set_itr = false;
   5666 
   5667 	rxq->rxq_packets = 0;
   5668 	rxq->rxq_bytes = 0;
   5669 	txq->txq_packets = 0;
   5670 	txq->txq_bytes = 0;
   5671 #endif
   5672 }
   5673 
   5674 /*
   5675  * wm_init:		[ifnet interface function]
   5676  *
   5677  *	Initialize the interface.
   5678  */
   5679 static int
   5680 wm_init(struct ifnet *ifp)
   5681 {
   5682 	struct wm_softc *sc = ifp->if_softc;
   5683 	int ret;
   5684 
   5685 	WM_CORE_LOCK(sc);
   5686 	ret = wm_init_locked(ifp);
   5687 	WM_CORE_UNLOCK(sc);
   5688 
   5689 	return ret;
   5690 }
   5691 
   5692 static int
   5693 wm_init_locked(struct ifnet *ifp)
   5694 {
   5695 	struct wm_softc *sc = ifp->if_softc;
   5696 	struct ethercom *ec = &sc->sc_ethercom;
   5697 	int i, j, trynum, error = 0;
   5698 	uint32_t reg;
   5699 
   5700 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5701 		device_xname(sc->sc_dev), __func__));
   5702 	KASSERT(WM_CORE_LOCKED(sc));
   5703 
   5704 	/*
   5705 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5706 	 * There is a small but measurable benefit to avoiding the adjusment
   5707 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5708 	 * on such platforms.  One possibility is that the DMA itself is
   5709 	 * slightly more efficient if the front of the entire packet (instead
   5710 	 * of the front of the headers) is aligned.
   5711 	 *
   5712 	 * Note we must always set align_tweak to 0 if we are using
   5713 	 * jumbo frames.
   5714 	 */
   5715 #ifdef __NO_STRICT_ALIGNMENT
   5716 	sc->sc_align_tweak = 0;
   5717 #else
   5718 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5719 		sc->sc_align_tweak = 0;
   5720 	else
   5721 		sc->sc_align_tweak = 2;
   5722 #endif /* __NO_STRICT_ALIGNMENT */
   5723 
   5724 	/* Cancel any pending I/O. */
   5725 	wm_stop_locked(ifp, 0);
   5726 
   5727 	/* Update statistics before reset */
   5728 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5729 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5730 
   5731 	/* PCH_SPT hardware workaround */
   5732 	if (sc->sc_type == WM_T_PCH_SPT)
   5733 		wm_flush_desc_rings(sc);
   5734 
   5735 	/* Reset the chip to a known state. */
   5736 	wm_reset(sc);
   5737 
   5738 	/*
   5739 	 * AMT based hardware can now take control from firmware
   5740 	 * Do this after reset.
   5741 	 */
   5742 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5743 		wm_get_hw_control(sc);
   5744 
   5745 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5746 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5747 		wm_legacy_irq_quirk_spt(sc);
   5748 
   5749 	/* Init hardware bits */
   5750 	wm_initialize_hardware_bits(sc);
   5751 
   5752 	/* Reset the PHY. */
   5753 	if (sc->sc_flags & WM_F_HAS_MII)
   5754 		wm_gmii_reset(sc);
   5755 
   5756 	if (sc->sc_type >= WM_T_ICH8) {
   5757 		reg = CSR_READ(sc, WMREG_GCR);
   5758 		/*
   5759 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5760 		 * default after reset.
   5761 		 */
   5762 		if (sc->sc_type == WM_T_ICH8)
   5763 			reg |= GCR_NO_SNOOP_ALL;
   5764 		else
   5765 			reg &= ~GCR_NO_SNOOP_ALL;
   5766 		CSR_WRITE(sc, WMREG_GCR, reg);
   5767 	}
   5768 	if ((sc->sc_type >= WM_T_ICH8)
   5769 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5770 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5771 
   5772 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5773 		reg |= CTRL_EXT_RO_DIS;
   5774 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5775 	}
   5776 
   5777 	/* Calculate (E)ITR value */
   5778 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5779 		/*
   5780 		 * For NEWQUEUE's EITR (except for 82575).
   5781 		 * 82575's EITR should be set same throttling value as other
   5782 		 * old controllers' ITR because the interrupt/sec calculation
   5783 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5784 		 *
   5785 		 * 82574's EITR should be set same throttling value as ITR.
   5786 		 *
   5787 		 * For N interrupts/sec, set this value to:
   5788 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5789 		 */
   5790 		sc->sc_itr_init = 450;
   5791 	} else if (sc->sc_type >= WM_T_82543) {
   5792 		/*
   5793 		 * Set up the interrupt throttling register (units of 256ns)
   5794 		 * Note that a footnote in Intel's documentation says this
   5795 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5796 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5797 		 * that that is also true for the 1024ns units of the other
   5798 		 * interrupt-related timer registers -- so, really, we ought
   5799 		 * to divide this value by 4 when the link speed is low.
   5800 		 *
   5801 		 * XXX implement this division at link speed change!
   5802 		 */
   5803 
   5804 		/*
   5805 		 * For N interrupts/sec, set this value to:
   5806 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5807 		 * absolute and packet timer values to this value
   5808 		 * divided by 4 to get "simple timer" behavior.
   5809 		 */
   5810 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5811 	}
   5812 
   5813 	error = wm_init_txrx_queues(sc);
   5814 	if (error)
   5815 		goto out;
   5816 
   5817 	/* Clear out the VLAN table -- we don't use it (yet). */
   5818 	CSR_WRITE(sc, WMREG_VET, 0);
   5819 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5820 		trynum = 10; /* Due to hw errata */
   5821 	else
   5822 		trynum = 1;
   5823 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5824 		for (j = 0; j < trynum; j++)
   5825 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5826 
   5827 	/*
   5828 	 * Set up flow-control parameters.
   5829 	 *
   5830 	 * XXX Values could probably stand some tuning.
   5831 	 */
   5832 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5833 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5834 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5835 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5836 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5837 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5838 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5839 	}
   5840 
   5841 	sc->sc_fcrtl = FCRTL_DFLT;
   5842 	if (sc->sc_type < WM_T_82543) {
   5843 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5844 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5845 	} else {
   5846 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5847 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5848 	}
   5849 
   5850 	if (sc->sc_type == WM_T_80003)
   5851 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5852 	else
   5853 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5854 
   5855 	/* Writes the control register. */
   5856 	wm_set_vlan(sc);
   5857 
   5858 	if (sc->sc_flags & WM_F_HAS_MII) {
   5859 		uint16_t kmreg;
   5860 
   5861 		switch (sc->sc_type) {
   5862 		case WM_T_80003:
   5863 		case WM_T_ICH8:
   5864 		case WM_T_ICH9:
   5865 		case WM_T_ICH10:
   5866 		case WM_T_PCH:
   5867 		case WM_T_PCH2:
   5868 		case WM_T_PCH_LPT:
   5869 		case WM_T_PCH_SPT:
   5870 		case WM_T_PCH_CNP:
   5871 			/*
   5872 			 * Set the mac to wait the maximum time between each
   5873 			 * iteration and increase the max iterations when
   5874 			 * polling the phy; this fixes erroneous timeouts at
   5875 			 * 10Mbps.
   5876 			 */
   5877 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5878 			    0xFFFF);
   5879 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5880 			    &kmreg);
   5881 			kmreg |= 0x3F;
   5882 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5883 			    kmreg);
   5884 			break;
   5885 		default:
   5886 			break;
   5887 		}
   5888 
   5889 		if (sc->sc_type == WM_T_80003) {
   5890 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5891 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5892 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5893 
   5894 			/* Bypass RX and TX FIFO's */
   5895 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5896 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5897 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5898 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5899 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5900 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5901 		}
   5902 	}
   5903 #if 0
   5904 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5905 #endif
   5906 
   5907 	/* Set up checksum offload parameters. */
   5908 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5909 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5910 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5911 		reg |= RXCSUM_IPOFL;
   5912 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5913 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5914 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5915 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5916 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5917 
   5918 	/* Set registers about MSI-X */
   5919 	if (wm_is_using_msix(sc)) {
   5920 		uint32_t ivar, qintr_idx;
   5921 		struct wm_queue *wmq;
   5922 		unsigned int qid;
   5923 
   5924 		if (sc->sc_type == WM_T_82575) {
   5925 			/* Interrupt control */
   5926 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5927 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5928 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5929 
   5930 			/* TX and RX */
   5931 			for (i = 0; i < sc->sc_nqueues; i++) {
   5932 				wmq = &sc->sc_queue[i];
   5933 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5934 				    EITR_TX_QUEUE(wmq->wmq_id)
   5935 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5936 			}
   5937 			/* Link status */
   5938 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5939 			    EITR_OTHER);
   5940 		} else if (sc->sc_type == WM_T_82574) {
   5941 			/* Interrupt control */
   5942 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5943 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5944 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5945 
   5946 			/*
   5947 			 * Workaround issue with spurious interrupts
   5948 			 * in MSI-X mode.
   5949 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5950 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5951 			 */
   5952 			reg = CSR_READ(sc, WMREG_RFCTL);
   5953 			reg |= WMREG_RFCTL_ACKDIS;
   5954 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5955 
   5956 			ivar = 0;
   5957 			/* TX and RX */
   5958 			for (i = 0; i < sc->sc_nqueues; i++) {
   5959 				wmq = &sc->sc_queue[i];
   5960 				qid = wmq->wmq_id;
   5961 				qintr_idx = wmq->wmq_intr_idx;
   5962 
   5963 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5964 				    IVAR_TX_MASK_Q_82574(qid));
   5965 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5966 				    IVAR_RX_MASK_Q_82574(qid));
   5967 			}
   5968 			/* Link status */
   5969 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5970 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5971 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5972 		} else {
   5973 			/* Interrupt control */
   5974 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5975 			    | GPIE_EIAME | GPIE_PBA);
   5976 
   5977 			switch (sc->sc_type) {
   5978 			case WM_T_82580:
   5979 			case WM_T_I350:
   5980 			case WM_T_I354:
   5981 			case WM_T_I210:
   5982 			case WM_T_I211:
   5983 				/* TX and RX */
   5984 				for (i = 0; i < sc->sc_nqueues; i++) {
   5985 					wmq = &sc->sc_queue[i];
   5986 					qid = wmq->wmq_id;
   5987 					qintr_idx = wmq->wmq_intr_idx;
   5988 
   5989 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5990 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5991 					ivar |= __SHIFTIN((qintr_idx
   5992 						| IVAR_VALID),
   5993 					    IVAR_TX_MASK_Q(qid));
   5994 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5995 					ivar |= __SHIFTIN((qintr_idx
   5996 						| IVAR_VALID),
   5997 					    IVAR_RX_MASK_Q(qid));
   5998 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5999 				}
   6000 				break;
   6001 			case WM_T_82576:
   6002 				/* TX and RX */
   6003 				for (i = 0; i < sc->sc_nqueues; i++) {
   6004 					wmq = &sc->sc_queue[i];
   6005 					qid = wmq->wmq_id;
   6006 					qintr_idx = wmq->wmq_intr_idx;
   6007 
   6008 					ivar = CSR_READ(sc,
   6009 					    WMREG_IVAR_Q_82576(qid));
   6010 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6011 					ivar |= __SHIFTIN((qintr_idx
   6012 						| IVAR_VALID),
   6013 					    IVAR_TX_MASK_Q_82576(qid));
   6014 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6015 					ivar |= __SHIFTIN((qintr_idx
   6016 						| IVAR_VALID),
   6017 					    IVAR_RX_MASK_Q_82576(qid));
   6018 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6019 					    ivar);
   6020 				}
   6021 				break;
   6022 			default:
   6023 				break;
   6024 			}
   6025 
   6026 			/* Link status */
   6027 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6028 			    IVAR_MISC_OTHER);
   6029 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6030 		}
   6031 
   6032 		if (wm_is_using_multiqueue(sc)) {
   6033 			wm_init_rss(sc);
   6034 
   6035 			/*
   6036 			** NOTE: Receive Full-Packet Checksum Offload
   6037 			** is mutually exclusive with Multiqueue. However
   6038 			** this is not the same as TCP/IP checksums which
   6039 			** still work.
   6040 			*/
   6041 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6042 			reg |= RXCSUM_PCSD;
   6043 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6044 		}
   6045 	}
   6046 
   6047 	/* Set up the interrupt registers. */
   6048 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6049 
   6050 	if (wm_is_using_msix(sc)) {
   6051 		uint32_t mask;
   6052 		struct wm_queue *wmq;
   6053 
   6054 		switch (sc->sc_type) {
   6055 		case WM_T_82574:
   6056 			mask = 0;
   6057 			for (i = 0; i < sc->sc_nqueues; i++) {
   6058 				wmq = &sc->sc_queue[i];
   6059 				mask |= ICR_TXQ(wmq->wmq_id);
   6060 				mask |= ICR_RXQ(wmq->wmq_id);
   6061 			}
   6062 			mask |= ICR_OTHER;
   6063 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6064 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6065 			break;
   6066 		default:
   6067 			if (sc->sc_type == WM_T_82575) {
   6068 				mask = 0;
   6069 				for (i = 0; i < sc->sc_nqueues; i++) {
   6070 					wmq = &sc->sc_queue[i];
   6071 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6072 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6073 				}
   6074 				mask |= EITR_OTHER;
   6075 			} else {
   6076 				mask = 0;
   6077 				for (i = 0; i < sc->sc_nqueues; i++) {
   6078 					wmq = &sc->sc_queue[i];
   6079 					mask |= 1 << wmq->wmq_intr_idx;
   6080 				}
   6081 				mask |= 1 << sc->sc_link_intr_idx;
   6082 			}
   6083 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6084 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6085 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6086 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   6087 			break;
   6088 		}
   6089 	} else {
   6090 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6091 		    ICR_RXO | ICR_RXT0;
   6092 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6093 	}
   6094 
   6095 	/* Set up the inter-packet gap. */
   6096 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6097 
   6098 	if (sc->sc_type >= WM_T_82543) {
   6099 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6100 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6101 			wm_itrs_writereg(sc, wmq);
   6102 		}
   6103 		/*
   6104 		 * Link interrupts occur much less than TX
   6105 		 * interrupts and RX interrupts. So, we don't
   6106 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6107 		 * FreeBSD's if_igb.
   6108 		 */
   6109 	}
   6110 
   6111 	/* Set the VLAN ethernetype. */
   6112 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6113 
   6114 	/*
   6115 	 * Set up the transmit control register; we start out with
   6116 	 * a collision distance suitable for FDX, but update it whe
   6117 	 * we resolve the media type.
   6118 	 */
   6119 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6120 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6121 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6122 	if (sc->sc_type >= WM_T_82571)
   6123 		sc->sc_tctl |= TCTL_MULR;
   6124 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6125 
   6126 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6127 		/* Write TDT after TCTL.EN is set. See the document. */
   6128 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6129 	}
   6130 
   6131 	if (sc->sc_type == WM_T_80003) {
   6132 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6133 		reg &= ~TCTL_EXT_GCEX_MASK;
   6134 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6135 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6136 	}
   6137 
   6138 	/* Set the media. */
   6139 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6140 		goto out;
   6141 
   6142 	/* Configure for OS presence */
   6143 	wm_init_manageability(sc);
   6144 
   6145 	/*
   6146 	 * Set up the receive control register; we actually program the
   6147 	 * register when we set the receive filter. Use multicast address
   6148 	 * offset type 0.
   6149 	 *
   6150 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6151 	 * don't enable that feature.
   6152 	 */
   6153 	sc->sc_mchash_type = 0;
   6154 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6155 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6156 
   6157 	/* 82574 use one buffer extended Rx descriptor. */
   6158 	if (sc->sc_type == WM_T_82574)
   6159 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6160 
   6161 	/*
   6162 	 * The I350 has a bug where it always strips the CRC whether
   6163 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6164 	 */
   6165 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6166 	    || (sc->sc_type == WM_T_I210))
   6167 		sc->sc_rctl |= RCTL_SECRC;
   6168 
   6169 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6170 	    && (ifp->if_mtu > ETHERMTU)) {
   6171 		sc->sc_rctl |= RCTL_LPE;
   6172 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6173 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6174 	}
   6175 
   6176 	if (MCLBYTES == 2048)
   6177 		sc->sc_rctl |= RCTL_2k;
   6178 	else {
   6179 		if (sc->sc_type >= WM_T_82543) {
   6180 			switch (MCLBYTES) {
   6181 			case 4096:
   6182 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6183 				break;
   6184 			case 8192:
   6185 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6186 				break;
   6187 			case 16384:
   6188 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6189 				break;
   6190 			default:
   6191 				panic("wm_init: MCLBYTES %d unsupported",
   6192 				    MCLBYTES);
   6193 				break;
   6194 			}
   6195 		} else
   6196 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6197 	}
   6198 
   6199 	/* Enable ECC */
   6200 	switch (sc->sc_type) {
   6201 	case WM_T_82571:
   6202 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6203 		reg |= PBA_ECC_CORR_EN;
   6204 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6205 		break;
   6206 	case WM_T_PCH_LPT:
   6207 	case WM_T_PCH_SPT:
   6208 	case WM_T_PCH_CNP:
   6209 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6210 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6211 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6212 
   6213 		sc->sc_ctrl |= CTRL_MEHE;
   6214 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6215 		break;
   6216 	default:
   6217 		break;
   6218 	}
   6219 
   6220 	/*
   6221 	 * Set the receive filter.
   6222 	 *
   6223 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6224 	 * the setting of RCTL.EN in wm_set_filter()
   6225 	 */
   6226 	wm_set_filter(sc);
   6227 
   6228 	/* On 575 and later set RDT only if RX enabled */
   6229 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6230 		int qidx;
   6231 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6232 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6233 			for (i = 0; i < WM_NRXDESC; i++) {
   6234 				mutex_enter(rxq->rxq_lock);
   6235 				wm_init_rxdesc(rxq, i);
   6236 				mutex_exit(rxq->rxq_lock);
   6237 
   6238 			}
   6239 		}
   6240 	}
   6241 
   6242 	wm_unset_stopping_flags(sc);
   6243 
   6244 	/* Start the one second link check clock. */
   6245 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6246 
   6247 	/* ...all done! */
   6248 	ifp->if_flags |= IFF_RUNNING;
   6249 	ifp->if_flags &= ~IFF_OACTIVE;
   6250 
   6251  out:
   6252 	/* Save last flags for the callback */
   6253 	sc->sc_if_flags = ifp->if_flags;
   6254 	sc->sc_ec_capenable = ec->ec_capenable;
   6255 	if (error)
   6256 		log(LOG_ERR, "%s: interface not running\n",
   6257 		    device_xname(sc->sc_dev));
   6258 	return error;
   6259 }
   6260 
   6261 /*
   6262  * wm_stop:		[ifnet interface function]
   6263  *
   6264  *	Stop transmission on the interface.
   6265  */
   6266 static void
   6267 wm_stop(struct ifnet *ifp, int disable)
   6268 {
   6269 	struct wm_softc *sc = ifp->if_softc;
   6270 
   6271 	WM_CORE_LOCK(sc);
   6272 	wm_stop_locked(ifp, disable);
   6273 	WM_CORE_UNLOCK(sc);
   6274 }
   6275 
   6276 static void
   6277 wm_stop_locked(struct ifnet *ifp, int disable)
   6278 {
   6279 	struct wm_softc *sc = ifp->if_softc;
   6280 	struct wm_txsoft *txs;
   6281 	int i, qidx;
   6282 
   6283 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6284 		device_xname(sc->sc_dev), __func__));
   6285 	KASSERT(WM_CORE_LOCKED(sc));
   6286 
   6287 	wm_set_stopping_flags(sc);
   6288 
   6289 	/* Stop the one second clock. */
   6290 	callout_stop(&sc->sc_tick_ch);
   6291 
   6292 	/* Stop the 82547 Tx FIFO stall check timer. */
   6293 	if (sc->sc_type == WM_T_82547)
   6294 		callout_stop(&sc->sc_txfifo_ch);
   6295 
   6296 	if (sc->sc_flags & WM_F_HAS_MII) {
   6297 		/* Down the MII. */
   6298 		mii_down(&sc->sc_mii);
   6299 	} else {
   6300 #if 0
   6301 		/* Should we clear PHY's status properly? */
   6302 		wm_reset(sc);
   6303 #endif
   6304 	}
   6305 
   6306 	/* Stop the transmit and receive processes. */
   6307 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6308 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6309 	sc->sc_rctl &= ~RCTL_EN;
   6310 
   6311 	/*
   6312 	 * Clear the interrupt mask to ensure the device cannot assert its
   6313 	 * interrupt line.
   6314 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6315 	 * service any currently pending or shared interrupt.
   6316 	 */
   6317 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6318 	sc->sc_icr = 0;
   6319 	if (wm_is_using_msix(sc)) {
   6320 		if (sc->sc_type != WM_T_82574) {
   6321 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6322 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6323 		} else
   6324 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6325 	}
   6326 
   6327 	/* Release any queued transmit buffers. */
   6328 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6329 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6330 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6331 		mutex_enter(txq->txq_lock);
   6332 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6333 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6334 			txs = &txq->txq_soft[i];
   6335 			if (txs->txs_mbuf != NULL) {
   6336 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6337 				m_freem(txs->txs_mbuf);
   6338 				txs->txs_mbuf = NULL;
   6339 			}
   6340 		}
   6341 		mutex_exit(txq->txq_lock);
   6342 	}
   6343 
   6344 	/* Mark the interface as down and cancel the watchdog timer. */
   6345 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6346 
   6347 	if (disable) {
   6348 		for (i = 0; i < sc->sc_nqueues; i++) {
   6349 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6350 			mutex_enter(rxq->rxq_lock);
   6351 			wm_rxdrain(rxq);
   6352 			mutex_exit(rxq->rxq_lock);
   6353 		}
   6354 	}
   6355 
   6356 #if 0 /* notyet */
   6357 	if (sc->sc_type >= WM_T_82544)
   6358 		CSR_WRITE(sc, WMREG_WUC, 0);
   6359 #endif
   6360 }
   6361 
   6362 static void
   6363 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6364 {
   6365 	struct mbuf *m;
   6366 	int i;
   6367 
   6368 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6369 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6370 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6371 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6372 		    m->m_data, m->m_len, m->m_flags);
   6373 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6374 	    i, i == 1 ? "" : "s");
   6375 }
   6376 
   6377 /*
   6378  * wm_82547_txfifo_stall:
   6379  *
   6380  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6381  *	reset the FIFO pointers, and restart packet transmission.
   6382  */
   6383 static void
   6384 wm_82547_txfifo_stall(void *arg)
   6385 {
   6386 	struct wm_softc *sc = arg;
   6387 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6388 
   6389 	mutex_enter(txq->txq_lock);
   6390 
   6391 	if (txq->txq_stopping)
   6392 		goto out;
   6393 
   6394 	if (txq->txq_fifo_stall) {
   6395 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6396 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6397 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6398 			/*
   6399 			 * Packets have drained.  Stop transmitter, reset
   6400 			 * FIFO pointers, restart transmitter, and kick
   6401 			 * the packet queue.
   6402 			 */
   6403 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6404 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6405 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6406 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6407 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6408 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6409 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6410 			CSR_WRITE_FLUSH(sc);
   6411 
   6412 			txq->txq_fifo_head = 0;
   6413 			txq->txq_fifo_stall = 0;
   6414 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6415 		} else {
   6416 			/*
   6417 			 * Still waiting for packets to drain; try again in
   6418 			 * another tick.
   6419 			 */
   6420 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6421 		}
   6422 	}
   6423 
   6424 out:
   6425 	mutex_exit(txq->txq_lock);
   6426 }
   6427 
   6428 /*
   6429  * wm_82547_txfifo_bugchk:
   6430  *
   6431  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6432  *	prevent enqueueing a packet that would wrap around the end
   6433  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6434  *
   6435  *	We do this by checking the amount of space before the end
   6436  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6437  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6438  *	the internal FIFO pointers to the beginning, and restart
   6439  *	transmission on the interface.
   6440  */
   6441 #define	WM_FIFO_HDR		0x10
   6442 #define	WM_82547_PAD_LEN	0x3e0
   6443 static int
   6444 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6445 {
   6446 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6447 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6448 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6449 
   6450 	/* Just return if already stalled. */
   6451 	if (txq->txq_fifo_stall)
   6452 		return 1;
   6453 
   6454 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6455 		/* Stall only occurs in half-duplex mode. */
   6456 		goto send_packet;
   6457 	}
   6458 
   6459 	if (len >= WM_82547_PAD_LEN + space) {
   6460 		txq->txq_fifo_stall = 1;
   6461 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6462 		return 1;
   6463 	}
   6464 
   6465  send_packet:
   6466 	txq->txq_fifo_head += len;
   6467 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6468 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6469 
   6470 	return 0;
   6471 }
   6472 
   6473 static int
   6474 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6475 {
   6476 	int error;
   6477 
   6478 	/*
   6479 	 * Allocate the control data structures, and create and load the
   6480 	 * DMA map for it.
   6481 	 *
   6482 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6483 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6484 	 * both sets within the same 4G segment.
   6485 	 */
   6486 	if (sc->sc_type < WM_T_82544)
   6487 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6488 	else
   6489 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6490 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6491 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6492 	else
   6493 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6494 
   6495 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6496 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6497 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6498 		aprint_error_dev(sc->sc_dev,
   6499 		    "unable to allocate TX control data, error = %d\n",
   6500 		    error);
   6501 		goto fail_0;
   6502 	}
   6503 
   6504 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6505 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6506 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6507 		aprint_error_dev(sc->sc_dev,
   6508 		    "unable to map TX control data, error = %d\n", error);
   6509 		goto fail_1;
   6510 	}
   6511 
   6512 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6513 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6514 		aprint_error_dev(sc->sc_dev,
   6515 		    "unable to create TX control data DMA map, error = %d\n",
   6516 		    error);
   6517 		goto fail_2;
   6518 	}
   6519 
   6520 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6521 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6522 		aprint_error_dev(sc->sc_dev,
   6523 		    "unable to load TX control data DMA map, error = %d\n",
   6524 		    error);
   6525 		goto fail_3;
   6526 	}
   6527 
   6528 	return 0;
   6529 
   6530  fail_3:
   6531 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6532  fail_2:
   6533 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6534 	    WM_TXDESCS_SIZE(txq));
   6535  fail_1:
   6536 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6537  fail_0:
   6538 	return error;
   6539 }
   6540 
   6541 static void
   6542 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6543 {
   6544 
   6545 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6546 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6547 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6548 	    WM_TXDESCS_SIZE(txq));
   6549 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6550 }
   6551 
   6552 static int
   6553 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6554 {
   6555 	int error;
   6556 	size_t rxq_descs_size;
   6557 
   6558 	/*
   6559 	 * Allocate the control data structures, and create and load the
   6560 	 * DMA map for it.
   6561 	 *
   6562 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6563 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6564 	 * both sets within the same 4G segment.
   6565 	 */
   6566 	rxq->rxq_ndesc = WM_NRXDESC;
   6567 	if (sc->sc_type == WM_T_82574)
   6568 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6569 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6570 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6571 	else
   6572 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6573 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6574 
   6575 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6576 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6577 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6578 		aprint_error_dev(sc->sc_dev,
   6579 		    "unable to allocate RX control data, error = %d\n",
   6580 		    error);
   6581 		goto fail_0;
   6582 	}
   6583 
   6584 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6585 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6586 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6587 		aprint_error_dev(sc->sc_dev,
   6588 		    "unable to map RX control data, error = %d\n", error);
   6589 		goto fail_1;
   6590 	}
   6591 
   6592 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6593 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6594 		aprint_error_dev(sc->sc_dev,
   6595 		    "unable to create RX control data DMA map, error = %d\n",
   6596 		    error);
   6597 		goto fail_2;
   6598 	}
   6599 
   6600 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6601 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6602 		aprint_error_dev(sc->sc_dev,
   6603 		    "unable to load RX control data DMA map, error = %d\n",
   6604 		    error);
   6605 		goto fail_3;
   6606 	}
   6607 
   6608 	return 0;
   6609 
   6610  fail_3:
   6611 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6612  fail_2:
   6613 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6614 	    rxq_descs_size);
   6615  fail_1:
   6616 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6617  fail_0:
   6618 	return error;
   6619 }
   6620 
   6621 static void
   6622 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6623 {
   6624 
   6625 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6626 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6627 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6628 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6629 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6630 }
   6631 
   6632 
   6633 static int
   6634 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6635 {
   6636 	int i, error;
   6637 
   6638 	/* Create the transmit buffer DMA maps. */
   6639 	WM_TXQUEUELEN(txq) =
   6640 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6641 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6642 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6643 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6644 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6645 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6646 			aprint_error_dev(sc->sc_dev,
   6647 			    "unable to create Tx DMA map %d, error = %d\n",
   6648 			    i, error);
   6649 			goto fail;
   6650 		}
   6651 	}
   6652 
   6653 	return 0;
   6654 
   6655  fail:
   6656 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6657 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6658 			bus_dmamap_destroy(sc->sc_dmat,
   6659 			    txq->txq_soft[i].txs_dmamap);
   6660 	}
   6661 	return error;
   6662 }
   6663 
   6664 static void
   6665 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6666 {
   6667 	int i;
   6668 
   6669 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6670 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6671 			bus_dmamap_destroy(sc->sc_dmat,
   6672 			    txq->txq_soft[i].txs_dmamap);
   6673 	}
   6674 }
   6675 
   6676 static int
   6677 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6678 {
   6679 	int i, error;
   6680 
   6681 	/* Create the receive buffer DMA maps. */
   6682 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6683 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6684 			    MCLBYTES, 0, 0,
   6685 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6686 			aprint_error_dev(sc->sc_dev,
   6687 			    "unable to create Rx DMA map %d error = %d\n",
   6688 			    i, error);
   6689 			goto fail;
   6690 		}
   6691 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6692 	}
   6693 
   6694 	return 0;
   6695 
   6696  fail:
   6697 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6698 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6699 			bus_dmamap_destroy(sc->sc_dmat,
   6700 			    rxq->rxq_soft[i].rxs_dmamap);
   6701 	}
   6702 	return error;
   6703 }
   6704 
   6705 static void
   6706 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6707 {
   6708 	int i;
   6709 
   6710 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6711 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6712 			bus_dmamap_destroy(sc->sc_dmat,
   6713 			    rxq->rxq_soft[i].rxs_dmamap);
   6714 	}
   6715 }
   6716 
   6717 /*
   6718  * wm_alloc_quques:
   6719  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6720  */
   6721 static int
   6722 wm_alloc_txrx_queues(struct wm_softc *sc)
   6723 {
   6724 	int i, error, tx_done, rx_done;
   6725 
   6726 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6727 	    KM_SLEEP);
   6728 	if (sc->sc_queue == NULL) {
   6729 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6730 		error = ENOMEM;
   6731 		goto fail_0;
   6732 	}
   6733 
   6734 	/* For transmission */
   6735 	error = 0;
   6736 	tx_done = 0;
   6737 	for (i = 0; i < sc->sc_nqueues; i++) {
   6738 #ifdef WM_EVENT_COUNTERS
   6739 		int j;
   6740 		const char *xname;
   6741 #endif
   6742 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6743 		txq->txq_sc = sc;
   6744 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6745 
   6746 		error = wm_alloc_tx_descs(sc, txq);
   6747 		if (error)
   6748 			break;
   6749 		error = wm_alloc_tx_buffer(sc, txq);
   6750 		if (error) {
   6751 			wm_free_tx_descs(sc, txq);
   6752 			break;
   6753 		}
   6754 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6755 		if (txq->txq_interq == NULL) {
   6756 			wm_free_tx_descs(sc, txq);
   6757 			wm_free_tx_buffer(sc, txq);
   6758 			error = ENOMEM;
   6759 			break;
   6760 		}
   6761 
   6762 #ifdef WM_EVENT_COUNTERS
   6763 		xname = device_xname(sc->sc_dev);
   6764 
   6765 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6766 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6767 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6768 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6769 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6770 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6771 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6772 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6773 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6774 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6775 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6776 
   6777 		for (j = 0; j < WM_NTXSEGS; j++) {
   6778 			snprintf(txq->txq_txseg_evcnt_names[j],
   6779 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6780 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6781 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6782 		}
   6783 
   6784 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6785 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6786 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6787 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6788 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6789 #endif /* WM_EVENT_COUNTERS */
   6790 
   6791 		tx_done++;
   6792 	}
   6793 	if (error)
   6794 		goto fail_1;
   6795 
   6796 	/* For receive */
   6797 	error = 0;
   6798 	rx_done = 0;
   6799 	for (i = 0; i < sc->sc_nqueues; i++) {
   6800 #ifdef WM_EVENT_COUNTERS
   6801 		const char *xname;
   6802 #endif
   6803 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6804 		rxq->rxq_sc = sc;
   6805 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6806 
   6807 		error = wm_alloc_rx_descs(sc, rxq);
   6808 		if (error)
   6809 			break;
   6810 
   6811 		error = wm_alloc_rx_buffer(sc, rxq);
   6812 		if (error) {
   6813 			wm_free_rx_descs(sc, rxq);
   6814 			break;
   6815 		}
   6816 
   6817 #ifdef WM_EVENT_COUNTERS
   6818 		xname = device_xname(sc->sc_dev);
   6819 
   6820 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6821 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6822 
   6823 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6824 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6825 #endif /* WM_EVENT_COUNTERS */
   6826 
   6827 		rx_done++;
   6828 	}
   6829 	if (error)
   6830 		goto fail_2;
   6831 
   6832 	for (i = 0; i < sc->sc_nqueues; i++) {
   6833 		char rndname[16];
   6834 
   6835 		snprintf(rndname, sizeof(rndname), "%sTXRX%d",
   6836 		    device_xname(sc->sc_dev), i);
   6837 		rnd_attach_source(&sc->sc_queue[i].rnd_source, rndname,
   6838 		    RND_TYPE_NET, RND_FLAG_DEFAULT);
   6839 	}
   6840 
   6841 	return 0;
   6842 
   6843  fail_2:
   6844 	for (i = 0; i < rx_done; i++) {
   6845 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6846 		wm_free_rx_buffer(sc, rxq);
   6847 		wm_free_rx_descs(sc, rxq);
   6848 		if (rxq->rxq_lock)
   6849 			mutex_obj_free(rxq->rxq_lock);
   6850 	}
   6851  fail_1:
   6852 	for (i = 0; i < tx_done; i++) {
   6853 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6854 		pcq_destroy(txq->txq_interq);
   6855 		wm_free_tx_buffer(sc, txq);
   6856 		wm_free_tx_descs(sc, txq);
   6857 		if (txq->txq_lock)
   6858 			mutex_obj_free(txq->txq_lock);
   6859 	}
   6860 
   6861 	kmem_free(sc->sc_queue,
   6862 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6863  fail_0:
   6864 	return error;
   6865 }
   6866 
   6867 /*
   6868  * wm_free_quques:
   6869  *	Free {tx,rx}descs and {tx,rx} buffers
   6870  */
   6871 static void
   6872 wm_free_txrx_queues(struct wm_softc *sc)
   6873 {
   6874 	int i;
   6875 
   6876 	for (i = 0; i < sc->sc_nqueues; i++)
   6877 		rnd_detach_source(&sc->sc_queue[i].rnd_source);
   6878 
   6879 	for (i = 0; i < sc->sc_nqueues; i++) {
   6880 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6881 
   6882 #ifdef WM_EVENT_COUNTERS
   6883 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6884 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6885 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6886 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6887 #endif /* WM_EVENT_COUNTERS */
   6888 
   6889 		wm_free_rx_buffer(sc, rxq);
   6890 		wm_free_rx_descs(sc, rxq);
   6891 		if (rxq->rxq_lock)
   6892 			mutex_obj_free(rxq->rxq_lock);
   6893 	}
   6894 
   6895 	for (i = 0; i < sc->sc_nqueues; i++) {
   6896 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6897 		struct mbuf *m;
   6898 #ifdef WM_EVENT_COUNTERS
   6899 		int j;
   6900 
   6901 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6902 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6903 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6904 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6905 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6906 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6907 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6908 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6909 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6910 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6911 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6912 
   6913 		for (j = 0; j < WM_NTXSEGS; j++)
   6914 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6915 
   6916 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6917 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6918 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6919 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6920 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6921 #endif /* WM_EVENT_COUNTERS */
   6922 
   6923 		/* Drain txq_interq */
   6924 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6925 			m_freem(m);
   6926 		pcq_destroy(txq->txq_interq);
   6927 
   6928 		wm_free_tx_buffer(sc, txq);
   6929 		wm_free_tx_descs(sc, txq);
   6930 		if (txq->txq_lock)
   6931 			mutex_obj_free(txq->txq_lock);
   6932 	}
   6933 
   6934 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6935 }
   6936 
   6937 static void
   6938 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6939 {
   6940 
   6941 	KASSERT(mutex_owned(txq->txq_lock));
   6942 
   6943 	/* Initialize the transmit descriptor ring. */
   6944 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6945 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6946 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6947 	txq->txq_free = WM_NTXDESC(txq);
   6948 	txq->txq_next = 0;
   6949 }
   6950 
   6951 static void
   6952 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6953     struct wm_txqueue *txq)
   6954 {
   6955 
   6956 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6957 		device_xname(sc->sc_dev), __func__));
   6958 	KASSERT(mutex_owned(txq->txq_lock));
   6959 
   6960 	if (sc->sc_type < WM_T_82543) {
   6961 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6962 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6963 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6964 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6965 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6966 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6967 	} else {
   6968 		int qid = wmq->wmq_id;
   6969 
   6970 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6971 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6972 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6973 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6974 
   6975 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6976 			/*
   6977 			 * Don't write TDT before TCTL.EN is set.
   6978 			 * See the document.
   6979 			 */
   6980 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6981 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6982 			    | TXDCTL_WTHRESH(0));
   6983 		else {
   6984 			/* XXX should update with AIM? */
   6985 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6986 			if (sc->sc_type >= WM_T_82540) {
   6987 				/* Should be the same */
   6988 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6989 			}
   6990 
   6991 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6992 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6993 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6994 		}
   6995 	}
   6996 }
   6997 
   6998 static void
   6999 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7000 {
   7001 	int i;
   7002 
   7003 	KASSERT(mutex_owned(txq->txq_lock));
   7004 
   7005 	/* Initialize the transmit job descriptors. */
   7006 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7007 		txq->txq_soft[i].txs_mbuf = NULL;
   7008 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7009 	txq->txq_snext = 0;
   7010 	txq->txq_sdirty = 0;
   7011 }
   7012 
   7013 static void
   7014 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7015     struct wm_txqueue *txq)
   7016 {
   7017 
   7018 	KASSERT(mutex_owned(txq->txq_lock));
   7019 
   7020 	/*
   7021 	 * Set up some register offsets that are different between
   7022 	 * the i82542 and the i82543 and later chips.
   7023 	 */
   7024 	if (sc->sc_type < WM_T_82543)
   7025 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7026 	else
   7027 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7028 
   7029 	wm_init_tx_descs(sc, txq);
   7030 	wm_init_tx_regs(sc, wmq, txq);
   7031 	wm_init_tx_buffer(sc, txq);
   7032 
   7033 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7034 	txq->txq_sending = false;
   7035 }
   7036 
   7037 static void
   7038 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7039     struct wm_rxqueue *rxq)
   7040 {
   7041 
   7042 	KASSERT(mutex_owned(rxq->rxq_lock));
   7043 
   7044 	/*
   7045 	 * Initialize the receive descriptor and receive job
   7046 	 * descriptor rings.
   7047 	 */
   7048 	if (sc->sc_type < WM_T_82543) {
   7049 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7050 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7051 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7052 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7053 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7054 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7055 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7056 
   7057 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7058 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7059 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7060 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7061 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7062 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7063 	} else {
   7064 		int qid = wmq->wmq_id;
   7065 
   7066 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7067 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7068 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7069 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7070 
   7071 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7072 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7073 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7074 
   7075 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7076 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7077 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7078 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7079 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7080 			    | RXDCTL_WTHRESH(1));
   7081 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7082 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7083 		} else {
   7084 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7085 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7086 			/* XXX should update with AIM? */
   7087 			CSR_WRITE(sc, WMREG_RDTR,
   7088 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7089 			/* MUST be same */
   7090 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7091 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7092 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7093 		}
   7094 	}
   7095 }
   7096 
   7097 static int
   7098 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7099 {
   7100 	struct wm_rxsoft *rxs;
   7101 	int error, i;
   7102 
   7103 	KASSERT(mutex_owned(rxq->rxq_lock));
   7104 
   7105 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7106 		rxs = &rxq->rxq_soft[i];
   7107 		if (rxs->rxs_mbuf == NULL) {
   7108 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7109 				log(LOG_ERR, "%s: unable to allocate or map "
   7110 				    "rx buffer %d, error = %d\n",
   7111 				    device_xname(sc->sc_dev), i, error);
   7112 				/*
   7113 				 * XXX Should attempt to run with fewer receive
   7114 				 * XXX buffers instead of just failing.
   7115 				 */
   7116 				wm_rxdrain(rxq);
   7117 				return ENOMEM;
   7118 			}
   7119 		} else {
   7120 			/*
   7121 			 * For 82575 and 82576, the RX descriptors must be
   7122 			 * initialized after the setting of RCTL.EN in
   7123 			 * wm_set_filter()
   7124 			 */
   7125 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7126 				wm_init_rxdesc(rxq, i);
   7127 		}
   7128 	}
   7129 	rxq->rxq_ptr = 0;
   7130 	rxq->rxq_discard = 0;
   7131 	WM_RXCHAIN_RESET(rxq);
   7132 
   7133 	return 0;
   7134 }
   7135 
   7136 static int
   7137 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7138     struct wm_rxqueue *rxq)
   7139 {
   7140 
   7141 	KASSERT(mutex_owned(rxq->rxq_lock));
   7142 
   7143 	/*
   7144 	 * Set up some register offsets that are different between
   7145 	 * the i82542 and the i82543 and later chips.
   7146 	 */
   7147 	if (sc->sc_type < WM_T_82543)
   7148 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7149 	else
   7150 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7151 
   7152 	wm_init_rx_regs(sc, wmq, rxq);
   7153 	return wm_init_rx_buffer(sc, rxq);
   7154 }
   7155 
   7156 /*
   7157  * wm_init_quques:
   7158  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7159  */
   7160 static int
   7161 wm_init_txrx_queues(struct wm_softc *sc)
   7162 {
   7163 	int i, error = 0;
   7164 
   7165 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7166 		device_xname(sc->sc_dev), __func__));
   7167 
   7168 	for (i = 0; i < sc->sc_nqueues; i++) {
   7169 		struct wm_queue *wmq = &sc->sc_queue[i];
   7170 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7171 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7172 
   7173 		/*
   7174 		 * TODO
   7175 		 * Currently, use constant variable instead of AIM.
   7176 		 * Furthermore, the interrupt interval of multiqueue which use
   7177 		 * polling mode is less than default value.
   7178 		 * More tuning and AIM are required.
   7179 		 */
   7180 		if (wm_is_using_multiqueue(sc))
   7181 			wmq->wmq_itr = 50;
   7182 		else
   7183 			wmq->wmq_itr = sc->sc_itr_init;
   7184 		wmq->wmq_set_itr = true;
   7185 
   7186 		mutex_enter(txq->txq_lock);
   7187 		wm_init_tx_queue(sc, wmq, txq);
   7188 		mutex_exit(txq->txq_lock);
   7189 
   7190 		mutex_enter(rxq->rxq_lock);
   7191 		error = wm_init_rx_queue(sc, wmq, rxq);
   7192 		mutex_exit(rxq->rxq_lock);
   7193 		if (error)
   7194 			break;
   7195 	}
   7196 
   7197 	return error;
   7198 }
   7199 
   7200 /*
   7201  * wm_tx_offload:
   7202  *
   7203  *	Set up TCP/IP checksumming parameters for the
   7204  *	specified packet.
   7205  */
   7206 static int
   7207 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7208     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7209 {
   7210 	struct mbuf *m0 = txs->txs_mbuf;
   7211 	struct livengood_tcpip_ctxdesc *t;
   7212 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7213 	uint32_t ipcse;
   7214 	struct ether_header *eh;
   7215 	int offset, iphl;
   7216 	uint8_t fields;
   7217 
   7218 	/*
   7219 	 * XXX It would be nice if the mbuf pkthdr had offset
   7220 	 * fields for the protocol headers.
   7221 	 */
   7222 
   7223 	eh = mtod(m0, struct ether_header *);
   7224 	switch (htons(eh->ether_type)) {
   7225 	case ETHERTYPE_IP:
   7226 	case ETHERTYPE_IPV6:
   7227 		offset = ETHER_HDR_LEN;
   7228 		break;
   7229 
   7230 	case ETHERTYPE_VLAN:
   7231 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7232 		break;
   7233 
   7234 	default:
   7235 		/* Don't support this protocol or encapsulation. */
   7236 		*fieldsp = 0;
   7237 		*cmdp = 0;
   7238 		return 0;
   7239 	}
   7240 
   7241 	if ((m0->m_pkthdr.csum_flags &
   7242 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7243 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7244 	} else
   7245 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7246 
   7247 	ipcse = offset + iphl - 1;
   7248 
   7249 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7250 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7251 	seg = 0;
   7252 	fields = 0;
   7253 
   7254 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7255 		int hlen = offset + iphl;
   7256 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7257 
   7258 		if (__predict_false(m0->m_len <
   7259 				    (hlen + sizeof(struct tcphdr)))) {
   7260 			/*
   7261 			 * TCP/IP headers are not in the first mbuf; we need
   7262 			 * to do this the slow and painful way. Let's just
   7263 			 * hope this doesn't happen very often.
   7264 			 */
   7265 			struct tcphdr th;
   7266 
   7267 			WM_Q_EVCNT_INCR(txq, tsopain);
   7268 
   7269 			m_copydata(m0, hlen, sizeof(th), &th);
   7270 			if (v4) {
   7271 				struct ip ip;
   7272 
   7273 				m_copydata(m0, offset, sizeof(ip), &ip);
   7274 				ip.ip_len = 0;
   7275 				m_copyback(m0,
   7276 				    offset + offsetof(struct ip, ip_len),
   7277 				    sizeof(ip.ip_len), &ip.ip_len);
   7278 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7279 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7280 			} else {
   7281 				struct ip6_hdr ip6;
   7282 
   7283 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7284 				ip6.ip6_plen = 0;
   7285 				m_copyback(m0,
   7286 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7287 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7288 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7289 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7290 			}
   7291 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7292 			    sizeof(th.th_sum), &th.th_sum);
   7293 
   7294 			hlen += th.th_off << 2;
   7295 		} else {
   7296 			/*
   7297 			 * TCP/IP headers are in the first mbuf; we can do
   7298 			 * this the easy way.
   7299 			 */
   7300 			struct tcphdr *th;
   7301 
   7302 			if (v4) {
   7303 				struct ip *ip =
   7304 				    (void *)(mtod(m0, char *) + offset);
   7305 				th = (void *)(mtod(m0, char *) + hlen);
   7306 
   7307 				ip->ip_len = 0;
   7308 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7309 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7310 			} else {
   7311 				struct ip6_hdr *ip6 =
   7312 				    (void *)(mtod(m0, char *) + offset);
   7313 				th = (void *)(mtod(m0, char *) + hlen);
   7314 
   7315 				ip6->ip6_plen = 0;
   7316 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7317 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7318 			}
   7319 			hlen += th->th_off << 2;
   7320 		}
   7321 
   7322 		if (v4) {
   7323 			WM_Q_EVCNT_INCR(txq, tso);
   7324 			cmdlen |= WTX_TCPIP_CMD_IP;
   7325 		} else {
   7326 			WM_Q_EVCNT_INCR(txq, tso6);
   7327 			ipcse = 0;
   7328 		}
   7329 		cmd |= WTX_TCPIP_CMD_TSE;
   7330 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7331 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7332 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7333 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7334 	}
   7335 
   7336 	/*
   7337 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7338 	 * offload feature, if we load the context descriptor, we
   7339 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7340 	 */
   7341 
   7342 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7343 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7344 	    WTX_TCPIP_IPCSE(ipcse);
   7345 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7346 		WM_Q_EVCNT_INCR(txq, ipsum);
   7347 		fields |= WTX_IXSM;
   7348 	}
   7349 
   7350 	offset += iphl;
   7351 
   7352 	if (m0->m_pkthdr.csum_flags &
   7353 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7354 		WM_Q_EVCNT_INCR(txq, tusum);
   7355 		fields |= WTX_TXSM;
   7356 		tucs = WTX_TCPIP_TUCSS(offset) |
   7357 		    WTX_TCPIP_TUCSO(offset +
   7358 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7359 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7360 	} else if ((m0->m_pkthdr.csum_flags &
   7361 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7362 		WM_Q_EVCNT_INCR(txq, tusum6);
   7363 		fields |= WTX_TXSM;
   7364 		tucs = WTX_TCPIP_TUCSS(offset) |
   7365 		    WTX_TCPIP_TUCSO(offset +
   7366 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7367 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7368 	} else {
   7369 		/* Just initialize it to a valid TCP context. */
   7370 		tucs = WTX_TCPIP_TUCSS(offset) |
   7371 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7372 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7373 	}
   7374 
   7375 	/*
   7376 	 * We don't have to write context descriptor for every packet
   7377 	 * except for 82574. For 82574, we must write context descriptor
   7378 	 * for every packet when we use two descriptor queues.
   7379 	 * It would be overhead to write context descriptor for every packet,
   7380 	 * however it does not cause problems.
   7381 	 */
   7382 	/* Fill in the context descriptor. */
   7383 	t = (struct livengood_tcpip_ctxdesc *)
   7384 	    &txq->txq_descs[txq->txq_next];
   7385 	t->tcpip_ipcs = htole32(ipcs);
   7386 	t->tcpip_tucs = htole32(tucs);
   7387 	t->tcpip_cmdlen = htole32(cmdlen);
   7388 	t->tcpip_seg = htole32(seg);
   7389 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7390 
   7391 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7392 	txs->txs_ndesc++;
   7393 
   7394 	*cmdp = cmd;
   7395 	*fieldsp = fields;
   7396 
   7397 	return 0;
   7398 }
   7399 
   7400 static inline int
   7401 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7402 {
   7403 	struct wm_softc *sc = ifp->if_softc;
   7404 	u_int cpuid = cpu_index(curcpu());
   7405 
   7406 	/*
   7407 	 * Currently, simple distribute strategy.
   7408 	 * TODO:
   7409 	 * distribute by flowid(RSS has value).
   7410 	 */
   7411 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7412 }
   7413 
   7414 /*
   7415  * wm_start:		[ifnet interface function]
   7416  *
   7417  *	Start packet transmission on the interface.
   7418  */
   7419 static void
   7420 wm_start(struct ifnet *ifp)
   7421 {
   7422 	struct wm_softc *sc = ifp->if_softc;
   7423 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7424 
   7425 #ifdef WM_MPSAFE
   7426 	KASSERT(if_is_mpsafe(ifp));
   7427 #endif
   7428 	/*
   7429 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7430 	 */
   7431 
   7432 	mutex_enter(txq->txq_lock);
   7433 	if (!txq->txq_stopping)
   7434 		wm_start_locked(ifp);
   7435 	mutex_exit(txq->txq_lock);
   7436 }
   7437 
   7438 static void
   7439 wm_start_locked(struct ifnet *ifp)
   7440 {
   7441 	struct wm_softc *sc = ifp->if_softc;
   7442 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7443 
   7444 	wm_send_common_locked(ifp, txq, false);
   7445 }
   7446 
   7447 static int
   7448 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7449 {
   7450 	int qid;
   7451 	struct wm_softc *sc = ifp->if_softc;
   7452 	struct wm_txqueue *txq;
   7453 
   7454 	qid = wm_select_txqueue(ifp, m);
   7455 	txq = &sc->sc_queue[qid].wmq_txq;
   7456 
   7457 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7458 		m_freem(m);
   7459 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7460 		return ENOBUFS;
   7461 	}
   7462 
   7463 	/* XXX NOMPSAFE: ifp->if_data should be percpu. */
   7464 	ifp->if_obytes += m->m_pkthdr.len;
   7465 	if (m->m_flags & M_MCAST)
   7466 		ifp->if_omcasts++;
   7467 
   7468 	if (mutex_tryenter(txq->txq_lock)) {
   7469 		if (!txq->txq_stopping)
   7470 			wm_transmit_locked(ifp, txq);
   7471 		mutex_exit(txq->txq_lock);
   7472 	}
   7473 
   7474 	return 0;
   7475 }
   7476 
   7477 static void
   7478 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7479 {
   7480 
   7481 	wm_send_common_locked(ifp, txq, true);
   7482 }
   7483 
   7484 static void
   7485 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7486     bool is_transmit)
   7487 {
   7488 	struct wm_softc *sc = ifp->if_softc;
   7489 	struct mbuf *m0;
   7490 	struct wm_txsoft *txs;
   7491 	bus_dmamap_t dmamap;
   7492 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7493 	bus_addr_t curaddr;
   7494 	bus_size_t seglen, curlen;
   7495 	uint32_t cksumcmd;
   7496 	uint8_t cksumfields;
   7497 	bool remap = true;
   7498 
   7499 	KASSERT(mutex_owned(txq->txq_lock));
   7500 
   7501 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7502 		return;
   7503 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7504 		return;
   7505 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7506 		return;
   7507 
   7508 	/* Remember the previous number of free descriptors. */
   7509 	ofree = txq->txq_free;
   7510 
   7511 	/*
   7512 	 * Loop through the send queue, setting up transmit descriptors
   7513 	 * until we drain the queue, or use up all available transmit
   7514 	 * descriptors.
   7515 	 */
   7516 	for (;;) {
   7517 		m0 = NULL;
   7518 
   7519 		/* Get a work queue entry. */
   7520 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7521 			wm_txeof(txq, UINT_MAX);
   7522 			if (txq->txq_sfree == 0) {
   7523 				DPRINTF(WM_DEBUG_TX,
   7524 				    ("%s: TX: no free job descriptors\n",
   7525 					device_xname(sc->sc_dev)));
   7526 				WM_Q_EVCNT_INCR(txq, txsstall);
   7527 				break;
   7528 			}
   7529 		}
   7530 
   7531 		/* Grab a packet off the queue. */
   7532 		if (is_transmit)
   7533 			m0 = pcq_get(txq->txq_interq);
   7534 		else
   7535 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7536 		if (m0 == NULL)
   7537 			break;
   7538 
   7539 		DPRINTF(WM_DEBUG_TX,
   7540 		    ("%s: TX: have packet to transmit: %p\n",
   7541 			device_xname(sc->sc_dev), m0));
   7542 
   7543 		txs = &txq->txq_soft[txq->txq_snext];
   7544 		dmamap = txs->txs_dmamap;
   7545 
   7546 		use_tso = (m0->m_pkthdr.csum_flags &
   7547 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7548 
   7549 		/*
   7550 		 * So says the Linux driver:
   7551 		 * The controller does a simple calculation to make sure
   7552 		 * there is enough room in the FIFO before initiating the
   7553 		 * DMA for each buffer. The calc is:
   7554 		 *	4 = ceil(buffer len / MSS)
   7555 		 * To make sure we don't overrun the FIFO, adjust the max
   7556 		 * buffer len if the MSS drops.
   7557 		 */
   7558 		dmamap->dm_maxsegsz =
   7559 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7560 		    ? m0->m_pkthdr.segsz << 2
   7561 		    : WTX_MAX_LEN;
   7562 
   7563 		/*
   7564 		 * Load the DMA map.  If this fails, the packet either
   7565 		 * didn't fit in the allotted number of segments, or we
   7566 		 * were short on resources.  For the too-many-segments
   7567 		 * case, we simply report an error and drop the packet,
   7568 		 * since we can't sanely copy a jumbo packet to a single
   7569 		 * buffer.
   7570 		 */
   7571 retry:
   7572 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7573 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7574 		if (__predict_false(error)) {
   7575 			if (error == EFBIG) {
   7576 				if (remap == true) {
   7577 					struct mbuf *m;
   7578 
   7579 					remap = false;
   7580 					m = m_defrag(m0, M_NOWAIT);
   7581 					if (m != NULL) {
   7582 						WM_Q_EVCNT_INCR(txq, defrag);
   7583 						m0 = m;
   7584 						goto retry;
   7585 					}
   7586 				}
   7587 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7588 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7589 				    "DMA segments, dropping...\n",
   7590 				    device_xname(sc->sc_dev));
   7591 				wm_dump_mbuf_chain(sc, m0);
   7592 				m_freem(m0);
   7593 				continue;
   7594 			}
   7595 			/* Short on resources, just stop for now. */
   7596 			DPRINTF(WM_DEBUG_TX,
   7597 			    ("%s: TX: dmamap load failed: %d\n",
   7598 				device_xname(sc->sc_dev), error));
   7599 			break;
   7600 		}
   7601 
   7602 		segs_needed = dmamap->dm_nsegs;
   7603 		if (use_tso) {
   7604 			/* For sentinel descriptor; see below. */
   7605 			segs_needed++;
   7606 		}
   7607 
   7608 		/*
   7609 		 * Ensure we have enough descriptors free to describe
   7610 		 * the packet. Note, we always reserve one descriptor
   7611 		 * at the end of the ring due to the semantics of the
   7612 		 * TDT register, plus one more in the event we need
   7613 		 * to load offload context.
   7614 		 */
   7615 		if (segs_needed > txq->txq_free - 2) {
   7616 			/*
   7617 			 * Not enough free descriptors to transmit this
   7618 			 * packet.  We haven't committed anything yet,
   7619 			 * so just unload the DMA map, put the packet
   7620 			 * pack on the queue, and punt. Notify the upper
   7621 			 * layer that there are no more slots left.
   7622 			 */
   7623 			DPRINTF(WM_DEBUG_TX,
   7624 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7625 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7626 				segs_needed, txq->txq_free - 1));
   7627 			if (!is_transmit)
   7628 				ifp->if_flags |= IFF_OACTIVE;
   7629 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7630 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7631 			WM_Q_EVCNT_INCR(txq, txdstall);
   7632 			break;
   7633 		}
   7634 
   7635 		/*
   7636 		 * Check for 82547 Tx FIFO bug. We need to do this
   7637 		 * once we know we can transmit the packet, since we
   7638 		 * do some internal FIFO space accounting here.
   7639 		 */
   7640 		if (sc->sc_type == WM_T_82547 &&
   7641 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7642 			DPRINTF(WM_DEBUG_TX,
   7643 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7644 				device_xname(sc->sc_dev)));
   7645 			if (!is_transmit)
   7646 				ifp->if_flags |= IFF_OACTIVE;
   7647 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7648 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7649 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7650 			break;
   7651 		}
   7652 
   7653 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7654 
   7655 		DPRINTF(WM_DEBUG_TX,
   7656 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7657 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7658 
   7659 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7660 
   7661 		/*
   7662 		 * Store a pointer to the packet so that we can free it
   7663 		 * later.
   7664 		 *
   7665 		 * Initially, we consider the number of descriptors the
   7666 		 * packet uses the number of DMA segments.  This may be
   7667 		 * incremented by 1 if we do checksum offload (a descriptor
   7668 		 * is used to set the checksum context).
   7669 		 */
   7670 		txs->txs_mbuf = m0;
   7671 		txs->txs_firstdesc = txq->txq_next;
   7672 		txs->txs_ndesc = segs_needed;
   7673 
   7674 		/* Set up offload parameters for this packet. */
   7675 		if (m0->m_pkthdr.csum_flags &
   7676 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7677 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7678 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7679 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7680 					  &cksumfields) != 0) {
   7681 				/* Error message already displayed. */
   7682 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7683 				continue;
   7684 			}
   7685 		} else {
   7686 			cksumcmd = 0;
   7687 			cksumfields = 0;
   7688 		}
   7689 
   7690 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7691 
   7692 		/* Sync the DMA map. */
   7693 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7694 		    BUS_DMASYNC_PREWRITE);
   7695 
   7696 		/* Initialize the transmit descriptor. */
   7697 		for (nexttx = txq->txq_next, seg = 0;
   7698 		     seg < dmamap->dm_nsegs; seg++) {
   7699 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7700 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7701 			     seglen != 0;
   7702 			     curaddr += curlen, seglen -= curlen,
   7703 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7704 				curlen = seglen;
   7705 
   7706 				/*
   7707 				 * So says the Linux driver:
   7708 				 * Work around for premature descriptor
   7709 				 * write-backs in TSO mode.  Append a
   7710 				 * 4-byte sentinel descriptor.
   7711 				 */
   7712 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7713 				    curlen > 8)
   7714 					curlen -= 4;
   7715 
   7716 				wm_set_dma_addr(
   7717 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7718 				txq->txq_descs[nexttx].wtx_cmdlen
   7719 				    = htole32(cksumcmd | curlen);
   7720 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7721 				    = 0;
   7722 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7723 				    = cksumfields;
   7724 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7725 				lasttx = nexttx;
   7726 
   7727 				DPRINTF(WM_DEBUG_TX,
   7728 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7729 					"len %#04zx\n",
   7730 					device_xname(sc->sc_dev), nexttx,
   7731 					(uint64_t)curaddr, curlen));
   7732 			}
   7733 		}
   7734 
   7735 		KASSERT(lasttx != -1);
   7736 
   7737 		/*
   7738 		 * Set up the command byte on the last descriptor of
   7739 		 * the packet. If we're in the interrupt delay window,
   7740 		 * delay the interrupt.
   7741 		 */
   7742 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7743 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7744 
   7745 		/*
   7746 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7747 		 * up the descriptor to encapsulate the packet for us.
   7748 		 *
   7749 		 * This is only valid on the last descriptor of the packet.
   7750 		 */
   7751 		if (vlan_has_tag(m0)) {
   7752 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7753 			    htole32(WTX_CMD_VLE);
   7754 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7755 			    = htole16(vlan_get_tag(m0));
   7756 		}
   7757 
   7758 		txs->txs_lastdesc = lasttx;
   7759 
   7760 		DPRINTF(WM_DEBUG_TX,
   7761 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7762 			device_xname(sc->sc_dev),
   7763 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7764 
   7765 		/* Sync the descriptors we're using. */
   7766 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7767 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7768 
   7769 		/* Give the packet to the chip. */
   7770 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7771 
   7772 		DPRINTF(WM_DEBUG_TX,
   7773 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7774 
   7775 		DPRINTF(WM_DEBUG_TX,
   7776 		    ("%s: TX: finished transmitting packet, job %d\n",
   7777 			device_xname(sc->sc_dev), txq->txq_snext));
   7778 
   7779 		/* Advance the tx pointer. */
   7780 		txq->txq_free -= txs->txs_ndesc;
   7781 		txq->txq_next = nexttx;
   7782 
   7783 		txq->txq_sfree--;
   7784 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7785 
   7786 		/* Pass the packet to any BPF listeners. */
   7787 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7788 	}
   7789 
   7790 	if (m0 != NULL) {
   7791 		if (!is_transmit)
   7792 			ifp->if_flags |= IFF_OACTIVE;
   7793 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7794 		WM_Q_EVCNT_INCR(txq, descdrop);
   7795 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7796 			__func__));
   7797 		m_freem(m0);
   7798 	}
   7799 
   7800 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7801 		/* No more slots; notify upper layer. */
   7802 		if (!is_transmit)
   7803 			ifp->if_flags |= IFF_OACTIVE;
   7804 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7805 	}
   7806 
   7807 	if (txq->txq_free != ofree) {
   7808 		/* Set a watchdog timer in case the chip flakes out. */
   7809 		txq->txq_lastsent = time_uptime;
   7810 		txq->txq_sending = true;
   7811 	}
   7812 }
   7813 
   7814 /*
   7815  * wm_nq_tx_offload:
   7816  *
   7817  *	Set up TCP/IP checksumming parameters for the
   7818  *	specified packet, for NEWQUEUE devices
   7819  */
   7820 static int
   7821 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7822     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7823 {
   7824 	struct mbuf *m0 = txs->txs_mbuf;
   7825 	uint32_t vl_len, mssidx, cmdc;
   7826 	struct ether_header *eh;
   7827 	int offset, iphl;
   7828 
   7829 	/*
   7830 	 * XXX It would be nice if the mbuf pkthdr had offset
   7831 	 * fields for the protocol headers.
   7832 	 */
   7833 	*cmdlenp = 0;
   7834 	*fieldsp = 0;
   7835 
   7836 	eh = mtod(m0, struct ether_header *);
   7837 	switch (htons(eh->ether_type)) {
   7838 	case ETHERTYPE_IP:
   7839 	case ETHERTYPE_IPV6:
   7840 		offset = ETHER_HDR_LEN;
   7841 		break;
   7842 
   7843 	case ETHERTYPE_VLAN:
   7844 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7845 		break;
   7846 
   7847 	default:
   7848 		/* Don't support this protocol or encapsulation. */
   7849 		*do_csum = false;
   7850 		return 0;
   7851 	}
   7852 	*do_csum = true;
   7853 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7854 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7855 
   7856 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7857 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7858 
   7859 	if ((m0->m_pkthdr.csum_flags &
   7860 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7861 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7862 	} else {
   7863 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7864 	}
   7865 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7866 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7867 
   7868 	if (vlan_has_tag(m0)) {
   7869 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7870 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7871 		*cmdlenp |= NQTX_CMD_VLE;
   7872 	}
   7873 
   7874 	mssidx = 0;
   7875 
   7876 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7877 		int hlen = offset + iphl;
   7878 		int tcp_hlen;
   7879 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7880 
   7881 		if (__predict_false(m0->m_len <
   7882 				    (hlen + sizeof(struct tcphdr)))) {
   7883 			/*
   7884 			 * TCP/IP headers are not in the first mbuf; we need
   7885 			 * to do this the slow and painful way. Let's just
   7886 			 * hope this doesn't happen very often.
   7887 			 */
   7888 			struct tcphdr th;
   7889 
   7890 			WM_Q_EVCNT_INCR(txq, tsopain);
   7891 
   7892 			m_copydata(m0, hlen, sizeof(th), &th);
   7893 			if (v4) {
   7894 				struct ip ip;
   7895 
   7896 				m_copydata(m0, offset, sizeof(ip), &ip);
   7897 				ip.ip_len = 0;
   7898 				m_copyback(m0,
   7899 				    offset + offsetof(struct ip, ip_len),
   7900 				    sizeof(ip.ip_len), &ip.ip_len);
   7901 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7902 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7903 			} else {
   7904 				struct ip6_hdr ip6;
   7905 
   7906 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7907 				ip6.ip6_plen = 0;
   7908 				m_copyback(m0,
   7909 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7910 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7911 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7912 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7913 			}
   7914 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7915 			    sizeof(th.th_sum), &th.th_sum);
   7916 
   7917 			tcp_hlen = th.th_off << 2;
   7918 		} else {
   7919 			/*
   7920 			 * TCP/IP headers are in the first mbuf; we can do
   7921 			 * this the easy way.
   7922 			 */
   7923 			struct tcphdr *th;
   7924 
   7925 			if (v4) {
   7926 				struct ip *ip =
   7927 				    (void *)(mtod(m0, char *) + offset);
   7928 				th = (void *)(mtod(m0, char *) + hlen);
   7929 
   7930 				ip->ip_len = 0;
   7931 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7932 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7933 			} else {
   7934 				struct ip6_hdr *ip6 =
   7935 				    (void *)(mtod(m0, char *) + offset);
   7936 				th = (void *)(mtod(m0, char *) + hlen);
   7937 
   7938 				ip6->ip6_plen = 0;
   7939 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7940 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7941 			}
   7942 			tcp_hlen = th->th_off << 2;
   7943 		}
   7944 		hlen += tcp_hlen;
   7945 		*cmdlenp |= NQTX_CMD_TSE;
   7946 
   7947 		if (v4) {
   7948 			WM_Q_EVCNT_INCR(txq, tso);
   7949 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7950 		} else {
   7951 			WM_Q_EVCNT_INCR(txq, tso6);
   7952 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7953 		}
   7954 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7955 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7956 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7957 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7958 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7959 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7960 	} else {
   7961 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7962 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7963 	}
   7964 
   7965 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7966 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7967 		cmdc |= NQTXC_CMD_IP4;
   7968 	}
   7969 
   7970 	if (m0->m_pkthdr.csum_flags &
   7971 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7972 		WM_Q_EVCNT_INCR(txq, tusum);
   7973 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7974 			cmdc |= NQTXC_CMD_TCP;
   7975 		else
   7976 			cmdc |= NQTXC_CMD_UDP;
   7977 
   7978 		cmdc |= NQTXC_CMD_IP4;
   7979 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7980 	}
   7981 	if (m0->m_pkthdr.csum_flags &
   7982 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7983 		WM_Q_EVCNT_INCR(txq, tusum6);
   7984 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7985 			cmdc |= NQTXC_CMD_TCP;
   7986 		else
   7987 			cmdc |= NQTXC_CMD_UDP;
   7988 
   7989 		cmdc |= NQTXC_CMD_IP6;
   7990 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7991 	}
   7992 
   7993 	/*
   7994 	 * We don't have to write context descriptor for every packet to
   7995 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7996 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7997 	 * controllers.
   7998 	 * It would be overhead to write context descriptor for every packet,
   7999 	 * however it does not cause problems.
   8000 	 */
   8001 	/* Fill in the context descriptor. */
   8002 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8003 	    htole32(vl_len);
   8004 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8005 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8006 	    htole32(cmdc);
   8007 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8008 	    htole32(mssidx);
   8009 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8010 	DPRINTF(WM_DEBUG_TX,
   8011 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8012 		txq->txq_next, 0, vl_len));
   8013 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8014 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8015 	txs->txs_ndesc++;
   8016 	return 0;
   8017 }
   8018 
   8019 /*
   8020  * wm_nq_start:		[ifnet interface function]
   8021  *
   8022  *	Start packet transmission on the interface for NEWQUEUE devices
   8023  */
   8024 static void
   8025 wm_nq_start(struct ifnet *ifp)
   8026 {
   8027 	struct wm_softc *sc = ifp->if_softc;
   8028 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8029 
   8030 #ifdef WM_MPSAFE
   8031 	KASSERT(if_is_mpsafe(ifp));
   8032 #endif
   8033 	/*
   8034 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   8035 	 */
   8036 
   8037 	mutex_enter(txq->txq_lock);
   8038 	if (!txq->txq_stopping)
   8039 		wm_nq_start_locked(ifp);
   8040 	mutex_exit(txq->txq_lock);
   8041 }
   8042 
   8043 static void
   8044 wm_nq_start_locked(struct ifnet *ifp)
   8045 {
   8046 	struct wm_softc *sc = ifp->if_softc;
   8047 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8048 
   8049 	wm_nq_send_common_locked(ifp, txq, false);
   8050 }
   8051 
   8052 static int
   8053 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8054 {
   8055 	int qid;
   8056 	struct wm_softc *sc = ifp->if_softc;
   8057 	struct wm_txqueue *txq;
   8058 
   8059 	qid = wm_select_txqueue(ifp, m);
   8060 	txq = &sc->sc_queue[qid].wmq_txq;
   8061 
   8062 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8063 		m_freem(m);
   8064 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8065 		return ENOBUFS;
   8066 	}
   8067 
   8068 	/* XXX NOMPSAFE: ifp->if_data should be percpu. */
   8069 	ifp->if_obytes += m->m_pkthdr.len;
   8070 	if (m->m_flags & M_MCAST)
   8071 		ifp->if_omcasts++;
   8072 
   8073 	/*
   8074 	 * The situations which this mutex_tryenter() fails at running time
   8075 	 * are below two patterns.
   8076 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8077 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8078 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8079 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8080 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8081 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8082 	 * stuck, either.
   8083 	 */
   8084 	if (mutex_tryenter(txq->txq_lock)) {
   8085 		if (!txq->txq_stopping)
   8086 			wm_nq_transmit_locked(ifp, txq);
   8087 		mutex_exit(txq->txq_lock);
   8088 	}
   8089 
   8090 	return 0;
   8091 }
   8092 
   8093 static void
   8094 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8095 {
   8096 
   8097 	wm_nq_send_common_locked(ifp, txq, true);
   8098 }
   8099 
   8100 static void
   8101 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8102     bool is_transmit)
   8103 {
   8104 	struct wm_softc *sc = ifp->if_softc;
   8105 	struct mbuf *m0;
   8106 	struct wm_txsoft *txs;
   8107 	bus_dmamap_t dmamap;
   8108 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8109 	bool do_csum, sent;
   8110 	bool remap = true;
   8111 
   8112 	KASSERT(mutex_owned(txq->txq_lock));
   8113 
   8114 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8115 		return;
   8116 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8117 		return;
   8118 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8119 		return;
   8120 
   8121 	sent = false;
   8122 
   8123 	/*
   8124 	 * Loop through the send queue, setting up transmit descriptors
   8125 	 * until we drain the queue, or use up all available transmit
   8126 	 * descriptors.
   8127 	 */
   8128 	for (;;) {
   8129 		m0 = NULL;
   8130 
   8131 		/* Get a work queue entry. */
   8132 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8133 			wm_txeof(txq, UINT_MAX);
   8134 			if (txq->txq_sfree == 0) {
   8135 				DPRINTF(WM_DEBUG_TX,
   8136 				    ("%s: TX: no free job descriptors\n",
   8137 					device_xname(sc->sc_dev)));
   8138 				WM_Q_EVCNT_INCR(txq, txsstall);
   8139 				break;
   8140 			}
   8141 		}
   8142 
   8143 		/* Grab a packet off the queue. */
   8144 		if (is_transmit)
   8145 			m0 = pcq_get(txq->txq_interq);
   8146 		else
   8147 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8148 		if (m0 == NULL)
   8149 			break;
   8150 
   8151 		DPRINTF(WM_DEBUG_TX,
   8152 		    ("%s: TX: have packet to transmit: %p\n",
   8153 		    device_xname(sc->sc_dev), m0));
   8154 
   8155 		txs = &txq->txq_soft[txq->txq_snext];
   8156 		dmamap = txs->txs_dmamap;
   8157 
   8158 		/*
   8159 		 * Load the DMA map.  If this fails, the packet either
   8160 		 * didn't fit in the allotted number of segments, or we
   8161 		 * were short on resources.  For the too-many-segments
   8162 		 * case, we simply report an error and drop the packet,
   8163 		 * since we can't sanely copy a jumbo packet to a single
   8164 		 * buffer.
   8165 		 */
   8166 retry:
   8167 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8168 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8169 		if (__predict_false(error)) {
   8170 			if (error == EFBIG) {
   8171 				if (remap == true) {
   8172 					struct mbuf *m;
   8173 
   8174 					remap = false;
   8175 					m = m_defrag(m0, M_NOWAIT);
   8176 					if (m != NULL) {
   8177 						WM_Q_EVCNT_INCR(txq, defrag);
   8178 						m0 = m;
   8179 						goto retry;
   8180 					}
   8181 				}
   8182 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8183 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8184 				    "DMA segments, dropping...\n",
   8185 				    device_xname(sc->sc_dev));
   8186 				wm_dump_mbuf_chain(sc, m0);
   8187 				m_freem(m0);
   8188 				continue;
   8189 			}
   8190 			/* Short on resources, just stop for now. */
   8191 			DPRINTF(WM_DEBUG_TX,
   8192 			    ("%s: TX: dmamap load failed: %d\n",
   8193 				device_xname(sc->sc_dev), error));
   8194 			break;
   8195 		}
   8196 
   8197 		segs_needed = dmamap->dm_nsegs;
   8198 
   8199 		/*
   8200 		 * Ensure we have enough descriptors free to describe
   8201 		 * the packet. Note, we always reserve one descriptor
   8202 		 * at the end of the ring due to the semantics of the
   8203 		 * TDT register, plus one more in the event we need
   8204 		 * to load offload context.
   8205 		 */
   8206 		if (segs_needed > txq->txq_free - 2) {
   8207 			/*
   8208 			 * Not enough free descriptors to transmit this
   8209 			 * packet.  We haven't committed anything yet,
   8210 			 * so just unload the DMA map, put the packet
   8211 			 * pack on the queue, and punt. Notify the upper
   8212 			 * layer that there are no more slots left.
   8213 			 */
   8214 			DPRINTF(WM_DEBUG_TX,
   8215 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8216 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8217 				segs_needed, txq->txq_free - 1));
   8218 			if (!is_transmit)
   8219 				ifp->if_flags |= IFF_OACTIVE;
   8220 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8221 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8222 			WM_Q_EVCNT_INCR(txq, txdstall);
   8223 			break;
   8224 		}
   8225 
   8226 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8227 
   8228 		DPRINTF(WM_DEBUG_TX,
   8229 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8230 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8231 
   8232 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8233 
   8234 		/*
   8235 		 * Store a pointer to the packet so that we can free it
   8236 		 * later.
   8237 		 *
   8238 		 * Initially, we consider the number of descriptors the
   8239 		 * packet uses the number of DMA segments.  This may be
   8240 		 * incremented by 1 if we do checksum offload (a descriptor
   8241 		 * is used to set the checksum context).
   8242 		 */
   8243 		txs->txs_mbuf = m0;
   8244 		txs->txs_firstdesc = txq->txq_next;
   8245 		txs->txs_ndesc = segs_needed;
   8246 
   8247 		/* Set up offload parameters for this packet. */
   8248 		uint32_t cmdlen, fields, dcmdlen;
   8249 		if (m0->m_pkthdr.csum_flags &
   8250 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8251 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8252 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8253 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8254 			    &do_csum) != 0) {
   8255 				/* Error message already displayed. */
   8256 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8257 				continue;
   8258 			}
   8259 		} else {
   8260 			do_csum = false;
   8261 			cmdlen = 0;
   8262 			fields = 0;
   8263 		}
   8264 
   8265 		/* Sync the DMA map. */
   8266 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8267 		    BUS_DMASYNC_PREWRITE);
   8268 
   8269 		/* Initialize the first transmit descriptor. */
   8270 		nexttx = txq->txq_next;
   8271 		if (!do_csum) {
   8272 			/* Setup a legacy descriptor */
   8273 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8274 			    dmamap->dm_segs[0].ds_addr);
   8275 			txq->txq_descs[nexttx].wtx_cmdlen =
   8276 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8277 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8278 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8279 			if (vlan_has_tag(m0)) {
   8280 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8281 				    htole32(WTX_CMD_VLE);
   8282 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8283 				    htole16(vlan_get_tag(m0));
   8284 			} else
   8285 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8286 
   8287 			dcmdlen = 0;
   8288 		} else {
   8289 			/* Setup an advanced data descriptor */
   8290 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8291 			    htole64(dmamap->dm_segs[0].ds_addr);
   8292 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8293 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8294 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8295 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8296 			    htole32(fields);
   8297 			DPRINTF(WM_DEBUG_TX,
   8298 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8299 				device_xname(sc->sc_dev), nexttx,
   8300 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8301 			DPRINTF(WM_DEBUG_TX,
   8302 			    ("\t 0x%08x%08x\n", fields,
   8303 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8304 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8305 		}
   8306 
   8307 		lasttx = nexttx;
   8308 		nexttx = WM_NEXTTX(txq, nexttx);
   8309 		/*
   8310 		 * Fill in the next descriptors. legacy or advanced format
   8311 		 * is the same here
   8312 		 */
   8313 		for (seg = 1; seg < dmamap->dm_nsegs;
   8314 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8315 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8316 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8317 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8318 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8319 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8320 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8321 			lasttx = nexttx;
   8322 
   8323 			DPRINTF(WM_DEBUG_TX,
   8324 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8325 				device_xname(sc->sc_dev), nexttx,
   8326 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8327 				dmamap->dm_segs[seg].ds_len));
   8328 		}
   8329 
   8330 		KASSERT(lasttx != -1);
   8331 
   8332 		/*
   8333 		 * Set up the command byte on the last descriptor of
   8334 		 * the packet. If we're in the interrupt delay window,
   8335 		 * delay the interrupt.
   8336 		 */
   8337 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8338 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8339 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8340 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8341 
   8342 		txs->txs_lastdesc = lasttx;
   8343 
   8344 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8345 		    device_xname(sc->sc_dev),
   8346 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8347 
   8348 		/* Sync the descriptors we're using. */
   8349 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8350 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8351 
   8352 		/* Give the packet to the chip. */
   8353 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8354 		sent = true;
   8355 
   8356 		DPRINTF(WM_DEBUG_TX,
   8357 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8358 
   8359 		DPRINTF(WM_DEBUG_TX,
   8360 		    ("%s: TX: finished transmitting packet, job %d\n",
   8361 			device_xname(sc->sc_dev), txq->txq_snext));
   8362 
   8363 		/* Advance the tx pointer. */
   8364 		txq->txq_free -= txs->txs_ndesc;
   8365 		txq->txq_next = nexttx;
   8366 
   8367 		txq->txq_sfree--;
   8368 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8369 
   8370 		/* Pass the packet to any BPF listeners. */
   8371 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8372 	}
   8373 
   8374 	if (m0 != NULL) {
   8375 		if (!is_transmit)
   8376 			ifp->if_flags |= IFF_OACTIVE;
   8377 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8378 		WM_Q_EVCNT_INCR(txq, descdrop);
   8379 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8380 			__func__));
   8381 		m_freem(m0);
   8382 	}
   8383 
   8384 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8385 		/* No more slots; notify upper layer. */
   8386 		if (!is_transmit)
   8387 			ifp->if_flags |= IFF_OACTIVE;
   8388 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8389 	}
   8390 
   8391 	if (sent) {
   8392 		/* Set a watchdog timer in case the chip flakes out. */
   8393 		txq->txq_lastsent = time_uptime;
   8394 		txq->txq_sending = true;
   8395 	}
   8396 }
   8397 
   8398 static void
   8399 wm_deferred_start_locked(struct wm_txqueue *txq)
   8400 {
   8401 	struct wm_softc *sc = txq->txq_sc;
   8402 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8403 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8404 	int qid = wmq->wmq_id;
   8405 
   8406 	KASSERT(mutex_owned(txq->txq_lock));
   8407 
   8408 	if (txq->txq_stopping) {
   8409 		mutex_exit(txq->txq_lock);
   8410 		return;
   8411 	}
   8412 
   8413 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8414 		/* XXX need for ALTQ or one CPU system */
   8415 		if (qid == 0)
   8416 			wm_nq_start_locked(ifp);
   8417 		wm_nq_transmit_locked(ifp, txq);
   8418 	} else {
   8419 		/* XXX need for ALTQ or one CPU system */
   8420 		if (qid == 0)
   8421 			wm_start_locked(ifp);
   8422 		wm_transmit_locked(ifp, txq);
   8423 	}
   8424 }
   8425 
   8426 /* Interrupt */
   8427 
   8428 /*
   8429  * wm_txeof:
   8430  *
   8431  *	Helper; handle transmit interrupts.
   8432  */
   8433 static bool
   8434 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8435 {
   8436 	struct wm_softc *sc = txq->txq_sc;
   8437 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8438 	struct wm_txsoft *txs;
   8439 	int count = 0;
   8440 	int i;
   8441 	uint8_t status;
   8442 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8443 	bool more = false;
   8444 
   8445 	KASSERT(mutex_owned(txq->txq_lock));
   8446 
   8447 	if (txq->txq_stopping)
   8448 		return false;
   8449 
   8450 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8451 	/* For ALTQ and legacy(not use multiqueue) ethernet controller */
   8452 	if (wmq->wmq_id == 0)
   8453 		ifp->if_flags &= ~IFF_OACTIVE;
   8454 
   8455 	/*
   8456 	 * Go through the Tx list and free mbufs for those
   8457 	 * frames which have been transmitted.
   8458 	 */
   8459 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8460 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8461 		if (limit-- == 0) {
   8462 			more = true;
   8463 			DPRINTF(WM_DEBUG_TX,
   8464 			    ("%s: TX: loop limited, job %d is not processed\n",
   8465 				device_xname(sc->sc_dev), i));
   8466 			break;
   8467 		}
   8468 
   8469 		txs = &txq->txq_soft[i];
   8470 
   8471 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8472 			device_xname(sc->sc_dev), i));
   8473 
   8474 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8475 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8476 
   8477 		status =
   8478 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8479 		if ((status & WTX_ST_DD) == 0) {
   8480 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8481 			    BUS_DMASYNC_PREREAD);
   8482 			break;
   8483 		}
   8484 
   8485 		count++;
   8486 		DPRINTF(WM_DEBUG_TX,
   8487 		    ("%s: TX: job %d done: descs %d..%d\n",
   8488 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8489 		    txs->txs_lastdesc));
   8490 
   8491 		/*
   8492 		 * XXX We should probably be using the statistics
   8493 		 * XXX registers, but I don't know if they exist
   8494 		 * XXX on chips before the i82544.
   8495 		 */
   8496 
   8497 #ifdef WM_EVENT_COUNTERS
   8498 		if (status & WTX_ST_TU)
   8499 			WM_Q_EVCNT_INCR(txq, underrun);
   8500 #endif /* WM_EVENT_COUNTERS */
   8501 
   8502 		/*
   8503 		 * 82574 and newer's document says the status field has neither
   8504 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8505 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8506 		 * Developer's Manual", 82574 datasheet and newer.
   8507 		 *
   8508 		 * XXX I saw the LC bit was set on I218 even though the media
   8509 		 * was full duplex, so the bit might be used for other
   8510 		 * meaning ...(I have no document).
   8511 		 */
   8512 
   8513 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8514 		    && ((sc->sc_type < WM_T_82574)
   8515 			|| (sc->sc_type == WM_T_80003))) {
   8516 			ifp->if_oerrors++;
   8517 			if (status & WTX_ST_LC)
   8518 				log(LOG_WARNING, "%s: late collision\n",
   8519 				    device_xname(sc->sc_dev));
   8520 			else if (status & WTX_ST_EC) {
   8521 				ifp->if_collisions +=
   8522 				    TX_COLLISION_THRESHOLD + 1;
   8523 				log(LOG_WARNING, "%s: excessive collisions\n",
   8524 				    device_xname(sc->sc_dev));
   8525 			}
   8526 		} else
   8527 			ifp->if_opackets++;
   8528 
   8529 		txq->txq_packets++;
   8530 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8531 
   8532 		txq->txq_free += txs->txs_ndesc;
   8533 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8534 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8535 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8536 		m_freem(txs->txs_mbuf);
   8537 		txs->txs_mbuf = NULL;
   8538 	}
   8539 
   8540 	/* Update the dirty transmit buffer pointer. */
   8541 	txq->txq_sdirty = i;
   8542 	DPRINTF(WM_DEBUG_TX,
   8543 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8544 
   8545 	/*
   8546 	 * If there are no more pending transmissions, cancel the watchdog
   8547 	 * timer.
   8548 	 */
   8549 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8550 		txq->txq_sending = false;
   8551 
   8552 	return more;
   8553 }
   8554 
   8555 static inline uint32_t
   8556 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8557 {
   8558 	struct wm_softc *sc = rxq->rxq_sc;
   8559 
   8560 	if (sc->sc_type == WM_T_82574)
   8561 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8562 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8563 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8564 	else
   8565 		return rxq->rxq_descs[idx].wrx_status;
   8566 }
   8567 
   8568 static inline uint32_t
   8569 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8570 {
   8571 	struct wm_softc *sc = rxq->rxq_sc;
   8572 
   8573 	if (sc->sc_type == WM_T_82574)
   8574 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8575 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8576 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8577 	else
   8578 		return rxq->rxq_descs[idx].wrx_errors;
   8579 }
   8580 
   8581 static inline uint16_t
   8582 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8583 {
   8584 	struct wm_softc *sc = rxq->rxq_sc;
   8585 
   8586 	if (sc->sc_type == WM_T_82574)
   8587 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8588 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8589 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8590 	else
   8591 		return rxq->rxq_descs[idx].wrx_special;
   8592 }
   8593 
   8594 static inline int
   8595 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8596 {
   8597 	struct wm_softc *sc = rxq->rxq_sc;
   8598 
   8599 	if (sc->sc_type == WM_T_82574)
   8600 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8601 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8602 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8603 	else
   8604 		return rxq->rxq_descs[idx].wrx_len;
   8605 }
   8606 
   8607 #ifdef WM_DEBUG
   8608 static inline uint32_t
   8609 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8610 {
   8611 	struct wm_softc *sc = rxq->rxq_sc;
   8612 
   8613 	if (sc->sc_type == WM_T_82574)
   8614 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8615 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8616 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8617 	else
   8618 		return 0;
   8619 }
   8620 
   8621 static inline uint8_t
   8622 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8623 {
   8624 	struct wm_softc *sc = rxq->rxq_sc;
   8625 
   8626 	if (sc->sc_type == WM_T_82574)
   8627 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8628 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8629 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8630 	else
   8631 		return 0;
   8632 }
   8633 #endif /* WM_DEBUG */
   8634 
   8635 static inline bool
   8636 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8637     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8638 {
   8639 
   8640 	if (sc->sc_type == WM_T_82574)
   8641 		return (status & ext_bit) != 0;
   8642 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8643 		return (status & nq_bit) != 0;
   8644 	else
   8645 		return (status & legacy_bit) != 0;
   8646 }
   8647 
   8648 static inline bool
   8649 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8650     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8651 {
   8652 
   8653 	if (sc->sc_type == WM_T_82574)
   8654 		return (error & ext_bit) != 0;
   8655 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8656 		return (error & nq_bit) != 0;
   8657 	else
   8658 		return (error & legacy_bit) != 0;
   8659 }
   8660 
   8661 static inline bool
   8662 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8663 {
   8664 
   8665 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8666 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8667 		return true;
   8668 	else
   8669 		return false;
   8670 }
   8671 
   8672 static inline bool
   8673 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8674 {
   8675 	struct wm_softc *sc = rxq->rxq_sc;
   8676 
   8677 	/* XXX missing error bit for newqueue? */
   8678 	if (wm_rxdesc_is_set_error(sc, errors,
   8679 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8680 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8681 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8682 		NQRXC_ERROR_RXE)) {
   8683 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8684 		    EXTRXC_ERROR_SE, 0))
   8685 			log(LOG_WARNING, "%s: symbol error\n",
   8686 			    device_xname(sc->sc_dev));
   8687 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8688 		    EXTRXC_ERROR_SEQ, 0))
   8689 			log(LOG_WARNING, "%s: receive sequence error\n",
   8690 			    device_xname(sc->sc_dev));
   8691 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8692 		    EXTRXC_ERROR_CE, 0))
   8693 			log(LOG_WARNING, "%s: CRC error\n",
   8694 			    device_xname(sc->sc_dev));
   8695 		return true;
   8696 	}
   8697 
   8698 	return false;
   8699 }
   8700 
   8701 static inline bool
   8702 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8703 {
   8704 	struct wm_softc *sc = rxq->rxq_sc;
   8705 
   8706 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8707 		NQRXC_STATUS_DD)) {
   8708 		/* We have processed all of the receive descriptors. */
   8709 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8710 		return false;
   8711 	}
   8712 
   8713 	return true;
   8714 }
   8715 
   8716 static inline bool
   8717 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8718     uint16_t vlantag, struct mbuf *m)
   8719 {
   8720 
   8721 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8722 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8723 		vlan_set_tag(m, le16toh(vlantag));
   8724 	}
   8725 
   8726 	return true;
   8727 }
   8728 
   8729 static inline void
   8730 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8731     uint32_t errors, struct mbuf *m)
   8732 {
   8733 	struct wm_softc *sc = rxq->rxq_sc;
   8734 
   8735 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8736 		if (wm_rxdesc_is_set_status(sc, status,
   8737 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8738 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8739 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8740 			if (wm_rxdesc_is_set_error(sc, errors,
   8741 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8742 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8743 		}
   8744 		if (wm_rxdesc_is_set_status(sc, status,
   8745 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8746 			/*
   8747 			 * Note: we don't know if this was TCP or UDP,
   8748 			 * so we just set both bits, and expect the
   8749 			 * upper layers to deal.
   8750 			 */
   8751 			WM_Q_EVCNT_INCR(rxq, tusum);
   8752 			m->m_pkthdr.csum_flags |=
   8753 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8754 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8755 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8756 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8757 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8758 		}
   8759 	}
   8760 }
   8761 
   8762 /*
   8763  * wm_rxeof:
   8764  *
   8765  *	Helper; handle receive interrupts.
   8766  */
   8767 static bool
   8768 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8769 {
   8770 	struct wm_softc *sc = rxq->rxq_sc;
   8771 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8772 	struct wm_rxsoft *rxs;
   8773 	struct mbuf *m;
   8774 	int i, len;
   8775 	int count = 0;
   8776 	uint32_t status, errors;
   8777 	uint16_t vlantag;
   8778 	bool more = false;
   8779 
   8780 	KASSERT(mutex_owned(rxq->rxq_lock));
   8781 
   8782 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8783 		if (limit-- == 0) {
   8784 			rxq->rxq_ptr = i;
   8785 			more = true;
   8786 			DPRINTF(WM_DEBUG_RX,
   8787 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8788 				device_xname(sc->sc_dev), i));
   8789 			break;
   8790 		}
   8791 
   8792 		rxs = &rxq->rxq_soft[i];
   8793 
   8794 		DPRINTF(WM_DEBUG_RX,
   8795 		    ("%s: RX: checking descriptor %d\n",
   8796 			device_xname(sc->sc_dev), i));
   8797 		wm_cdrxsync(rxq, i,
   8798 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8799 
   8800 		status = wm_rxdesc_get_status(rxq, i);
   8801 		errors = wm_rxdesc_get_errors(rxq, i);
   8802 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8803 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8804 #ifdef WM_DEBUG
   8805 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8806 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8807 #endif
   8808 
   8809 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8810 			/*
   8811 			 * Update the receive pointer holding rxq_lock
   8812 			 * consistent with increment counter.
   8813 			 */
   8814 			rxq->rxq_ptr = i;
   8815 			break;
   8816 		}
   8817 
   8818 		count++;
   8819 		if (__predict_false(rxq->rxq_discard)) {
   8820 			DPRINTF(WM_DEBUG_RX,
   8821 			    ("%s: RX: discarding contents of descriptor %d\n",
   8822 				device_xname(sc->sc_dev), i));
   8823 			wm_init_rxdesc(rxq, i);
   8824 			if (wm_rxdesc_is_eop(rxq, status)) {
   8825 				/* Reset our state. */
   8826 				DPRINTF(WM_DEBUG_RX,
   8827 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8828 					device_xname(sc->sc_dev)));
   8829 				rxq->rxq_discard = 0;
   8830 			}
   8831 			continue;
   8832 		}
   8833 
   8834 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8835 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8836 
   8837 		m = rxs->rxs_mbuf;
   8838 
   8839 		/*
   8840 		 * Add a new receive buffer to the ring, unless of
   8841 		 * course the length is zero. Treat the latter as a
   8842 		 * failed mapping.
   8843 		 */
   8844 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8845 			/*
   8846 			 * Failed, throw away what we've done so
   8847 			 * far, and discard the rest of the packet.
   8848 			 */
   8849 			ifp->if_ierrors++;
   8850 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8851 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8852 			wm_init_rxdesc(rxq, i);
   8853 			if (!wm_rxdesc_is_eop(rxq, status))
   8854 				rxq->rxq_discard = 1;
   8855 			if (rxq->rxq_head != NULL)
   8856 				m_freem(rxq->rxq_head);
   8857 			WM_RXCHAIN_RESET(rxq);
   8858 			DPRINTF(WM_DEBUG_RX,
   8859 			    ("%s: RX: Rx buffer allocation failed, "
   8860 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8861 				rxq->rxq_discard ? " (discard)" : ""));
   8862 			continue;
   8863 		}
   8864 
   8865 		m->m_len = len;
   8866 		rxq->rxq_len += len;
   8867 		DPRINTF(WM_DEBUG_RX,
   8868 		    ("%s: RX: buffer at %p len %d\n",
   8869 			device_xname(sc->sc_dev), m->m_data, len));
   8870 
   8871 		/* If this is not the end of the packet, keep looking. */
   8872 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8873 			WM_RXCHAIN_LINK(rxq, m);
   8874 			DPRINTF(WM_DEBUG_RX,
   8875 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8876 				device_xname(sc->sc_dev), rxq->rxq_len));
   8877 			continue;
   8878 		}
   8879 
   8880 		/*
   8881 		 * Okay, we have the entire packet now. The chip is
   8882 		 * configured to include the FCS except I350 and I21[01]
   8883 		 * (not all chips can be configured to strip it),
   8884 		 * so we need to trim it.
   8885 		 * May need to adjust length of previous mbuf in the
   8886 		 * chain if the current mbuf is too short.
   8887 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8888 		 * is always set in I350, so we don't trim it.
   8889 		 */
   8890 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8891 		    && (sc->sc_type != WM_T_I210)
   8892 		    && (sc->sc_type != WM_T_I211)) {
   8893 			if (m->m_len < ETHER_CRC_LEN) {
   8894 				rxq->rxq_tail->m_len
   8895 				    -= (ETHER_CRC_LEN - m->m_len);
   8896 				m->m_len = 0;
   8897 			} else
   8898 				m->m_len -= ETHER_CRC_LEN;
   8899 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8900 		} else
   8901 			len = rxq->rxq_len;
   8902 
   8903 		WM_RXCHAIN_LINK(rxq, m);
   8904 
   8905 		*rxq->rxq_tailp = NULL;
   8906 		m = rxq->rxq_head;
   8907 
   8908 		WM_RXCHAIN_RESET(rxq);
   8909 
   8910 		DPRINTF(WM_DEBUG_RX,
   8911 		    ("%s: RX: have entire packet, len -> %d\n",
   8912 			device_xname(sc->sc_dev), len));
   8913 
   8914 		/* If an error occurred, update stats and drop the packet. */
   8915 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8916 			m_freem(m);
   8917 			continue;
   8918 		}
   8919 
   8920 		/* No errors.  Receive the packet. */
   8921 		m_set_rcvif(m, ifp);
   8922 		m->m_pkthdr.len = len;
   8923 		/*
   8924 		 * TODO
   8925 		 * should be save rsshash and rsstype to this mbuf.
   8926 		 */
   8927 		DPRINTF(WM_DEBUG_RX,
   8928 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8929 			device_xname(sc->sc_dev), rsstype, rsshash));
   8930 
   8931 		/*
   8932 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8933 		 * for us.  Associate the tag with the packet.
   8934 		 */
   8935 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8936 			continue;
   8937 
   8938 		/* Set up checksum info for this packet. */
   8939 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8940 		/*
   8941 		 * Update the receive pointer holding rxq_lock consistent with
   8942 		 * increment counter.
   8943 		 */
   8944 		rxq->rxq_ptr = i;
   8945 		rxq->rxq_packets++;
   8946 		rxq->rxq_bytes += len;
   8947 		mutex_exit(rxq->rxq_lock);
   8948 
   8949 		/* Pass it on. */
   8950 		if_percpuq_enqueue(sc->sc_ipq, m);
   8951 
   8952 		mutex_enter(rxq->rxq_lock);
   8953 
   8954 		if (rxq->rxq_stopping)
   8955 			break;
   8956 	}
   8957 
   8958 	DPRINTF(WM_DEBUG_RX,
   8959 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8960 
   8961 	return more;
   8962 }
   8963 
   8964 /*
   8965  * wm_linkintr_gmii:
   8966  *
   8967  *	Helper; handle link interrupts for GMII.
   8968  */
   8969 static void
   8970 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8971 {
   8972 	device_t dev = sc->sc_dev;
   8973 	uint32_t status, reg;
   8974 	bool link;
   8975 	int rv;
   8976 
   8977 	KASSERT(WM_CORE_LOCKED(sc));
   8978 
   8979 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   8980 		__func__));
   8981 
   8982 	if ((icr & ICR_LSC) == 0) {
   8983 		if (icr & ICR_RXSEQ)
   8984 			DPRINTF(WM_DEBUG_LINK,
   8985 			    ("%s: LINK Receive sequence error\n",
   8986 				device_xname(dev)));
   8987 		return;
   8988 	}
   8989 
   8990 	/* Link status changed */
   8991 	status = CSR_READ(sc, WMREG_STATUS);
   8992 	link = status & STATUS_LU;
   8993 	if (link) {
   8994 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8995 			device_xname(dev),
   8996 			(status & STATUS_FD) ? "FDX" : "HDX"));
   8997 	} else {
   8998 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8999 			device_xname(dev)));
   9000 	}
   9001 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9002 		wm_gig_downshift_workaround_ich8lan(sc);
   9003 
   9004 	if ((sc->sc_type == WM_T_ICH8)
   9005 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9006 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9007 	}
   9008 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9009 		device_xname(dev)));
   9010 	mii_pollstat(&sc->sc_mii);
   9011 	if (sc->sc_type == WM_T_82543) {
   9012 		int miistatus, active;
   9013 
   9014 		/*
   9015 		 * With 82543, we need to force speed and
   9016 		 * duplex on the MAC equal to what the PHY
   9017 		 * speed and duplex configuration is.
   9018 		 */
   9019 		miistatus = sc->sc_mii.mii_media_status;
   9020 
   9021 		if (miistatus & IFM_ACTIVE) {
   9022 			active = sc->sc_mii.mii_media_active;
   9023 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9024 			switch (IFM_SUBTYPE(active)) {
   9025 			case IFM_10_T:
   9026 				sc->sc_ctrl |= CTRL_SPEED_10;
   9027 				break;
   9028 			case IFM_100_TX:
   9029 				sc->sc_ctrl |= CTRL_SPEED_100;
   9030 				break;
   9031 			case IFM_1000_T:
   9032 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9033 				break;
   9034 			default:
   9035 				/*
   9036 				 * Fiber?
   9037 				 * Shoud not enter here.
   9038 				 */
   9039 				device_printf(dev, "unknown media (%x)\n",
   9040 				    active);
   9041 				break;
   9042 			}
   9043 			if (active & IFM_FDX)
   9044 				sc->sc_ctrl |= CTRL_FD;
   9045 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9046 		}
   9047 	} else if (sc->sc_type == WM_T_PCH) {
   9048 		wm_k1_gig_workaround_hv(sc,
   9049 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9050 	}
   9051 
   9052 	/*
   9053 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9054 	 * aggressive resulting in many collisions. To avoid this, increase
   9055 	 * the IPG and reduce Rx latency in the PHY.
   9056 	 */
   9057 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9058 	    && link) {
   9059 		uint32_t tipg_reg;
   9060 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9061 		bool fdx;
   9062 		uint16_t emi_addr, emi_val;
   9063 
   9064 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9065 		tipg_reg &= ~TIPG_IPGT_MASK;
   9066 		fdx = status & STATUS_FD;
   9067 
   9068 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9069 			tipg_reg |= 0xff;
   9070 			/* Reduce Rx latency in analog PHY */
   9071 			emi_val = 0;
   9072 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9073 		    fdx && speed != STATUS_SPEED_1000) {
   9074 			tipg_reg |= 0xc;
   9075 			emi_val = 1;
   9076 		} else {
   9077 			/* Roll back the default values */
   9078 			tipg_reg |= 0x08;
   9079 			emi_val = 1;
   9080 		}
   9081 
   9082 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9083 
   9084 		rv = sc->phy.acquire(sc);
   9085 		if (rv)
   9086 			return;
   9087 
   9088 		if (sc->sc_type == WM_T_PCH2)
   9089 			emi_addr = I82579_RX_CONFIG;
   9090 		else
   9091 			emi_addr = I217_RX_CONFIG;
   9092 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9093 
   9094 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9095 			uint16_t phy_reg;
   9096 
   9097 			sc->phy.readreg_locked(dev, 2,
   9098 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9099 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9100 			if (speed == STATUS_SPEED_100
   9101 			    || speed == STATUS_SPEED_10)
   9102 				phy_reg |= 0x3e8;
   9103 			else
   9104 				phy_reg |= 0xfa;
   9105 			sc->phy.writereg_locked(dev, 2,
   9106 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9107 
   9108 			if (speed == STATUS_SPEED_1000) {
   9109 				sc->phy.readreg_locked(dev, 2,
   9110 				    HV_PM_CTRL, &phy_reg);
   9111 
   9112 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9113 
   9114 				sc->phy.writereg_locked(dev, 2,
   9115 				    HV_PM_CTRL, phy_reg);
   9116 			}
   9117 		}
   9118 		sc->phy.release(sc);
   9119 
   9120 		if (rv)
   9121 			return;
   9122 
   9123 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9124 			uint16_t data, ptr_gap;
   9125 
   9126 			if (speed == STATUS_SPEED_1000) {
   9127 				rv = sc->phy.acquire(sc);
   9128 				if (rv)
   9129 					return;
   9130 
   9131 				rv = sc->phy.readreg_locked(dev, 2,
   9132 				    I219_UNKNOWN1, &data);
   9133 				if (rv) {
   9134 					sc->phy.release(sc);
   9135 					return;
   9136 				}
   9137 
   9138 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9139 				if (ptr_gap < 0x18) {
   9140 					data &= ~(0x3ff << 2);
   9141 					data |= (0x18 << 2);
   9142 					rv = sc->phy.writereg_locked(dev,
   9143 					    2, I219_UNKNOWN1, data);
   9144 				}
   9145 				sc->phy.release(sc);
   9146 				if (rv)
   9147 					return;
   9148 			} else {
   9149 				rv = sc->phy.acquire(sc);
   9150 				if (rv)
   9151 					return;
   9152 
   9153 				rv = sc->phy.writereg_locked(dev, 2,
   9154 				    I219_UNKNOWN1, 0xc023);
   9155 				sc->phy.release(sc);
   9156 				if (rv)
   9157 					return;
   9158 
   9159 			}
   9160 		}
   9161 	}
   9162 
   9163 	/*
   9164 	 * I217 Packet Loss issue:
   9165 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9166 	 * on power up.
   9167 	 * Set the Beacon Duration for I217 to 8 usec
   9168 	 */
   9169 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9170 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9171 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9172 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9173 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9174 	}
   9175 
   9176 	/* Work-around I218 hang issue */
   9177 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9178 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9179 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9180 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9181 		wm_k1_workaround_lpt_lp(sc, link);
   9182 
   9183 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9184 		/*
   9185 		 * Set platform power management values for Latency
   9186 		 * Tolerance Reporting (LTR)
   9187 		 */
   9188 		wm_platform_pm_pch_lpt(sc,
   9189 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9190 	}
   9191 
   9192 	/* Clear link partner's EEE ability */
   9193 	sc->eee_lp_ability = 0;
   9194 
   9195 	/* FEXTNVM6 K1-off workaround */
   9196 	if (sc->sc_type == WM_T_PCH_SPT) {
   9197 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9198 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9199 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9200 		else
   9201 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9202 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9203 	}
   9204 
   9205 	if (!link)
   9206 		return;
   9207 
   9208 	switch (sc->sc_type) {
   9209 	case WM_T_PCH2:
   9210 		wm_k1_workaround_lv(sc);
   9211 		/* FALLTHROUGH */
   9212 	case WM_T_PCH:
   9213 		if (sc->sc_phytype == WMPHY_82578)
   9214 			wm_link_stall_workaround_hv(sc);
   9215 		break;
   9216 	default:
   9217 		break;
   9218 	}
   9219 
   9220 	/* Enable/Disable EEE after link up */
   9221 	if (sc->sc_phytype > WMPHY_82579)
   9222 		wm_set_eee_pchlan(sc);
   9223 }
   9224 
   9225 /*
   9226  * wm_linkintr_tbi:
   9227  *
   9228  *	Helper; handle link interrupts for TBI mode.
   9229  */
   9230 static void
   9231 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9232 {
   9233 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9234 	uint32_t status;
   9235 
   9236 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9237 		__func__));
   9238 
   9239 	status = CSR_READ(sc, WMREG_STATUS);
   9240 	if (icr & ICR_LSC) {
   9241 		wm_check_for_link(sc);
   9242 		if (status & STATUS_LU) {
   9243 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9244 				device_xname(sc->sc_dev),
   9245 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9246 			/*
   9247 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9248 			 * so we should update sc->sc_ctrl
   9249 			 */
   9250 
   9251 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9252 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9253 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9254 			if (status & STATUS_FD)
   9255 				sc->sc_tctl |=
   9256 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9257 			else
   9258 				sc->sc_tctl |=
   9259 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9260 			if (sc->sc_ctrl & CTRL_TFCE)
   9261 				sc->sc_fcrtl |= FCRTL_XONE;
   9262 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9263 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9264 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9265 			sc->sc_tbi_linkup = 1;
   9266 			if_link_state_change(ifp, LINK_STATE_UP);
   9267 		} else {
   9268 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9269 				device_xname(sc->sc_dev)));
   9270 			sc->sc_tbi_linkup = 0;
   9271 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9272 		}
   9273 		/* Update LED */
   9274 		wm_tbi_serdes_set_linkled(sc);
   9275 	} else if (icr & ICR_RXSEQ)
   9276 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9277 			device_xname(sc->sc_dev)));
   9278 }
   9279 
   9280 /*
   9281  * wm_linkintr_serdes:
   9282  *
   9283  *	Helper; handle link interrupts for TBI mode.
   9284  */
   9285 static void
   9286 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9287 {
   9288 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9289 	struct mii_data *mii = &sc->sc_mii;
   9290 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9291 	uint32_t pcs_adv, pcs_lpab, reg;
   9292 
   9293 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9294 		__func__));
   9295 
   9296 	if (icr & ICR_LSC) {
   9297 		/* Check PCS */
   9298 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9299 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9300 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9301 				device_xname(sc->sc_dev)));
   9302 			mii->mii_media_status |= IFM_ACTIVE;
   9303 			sc->sc_tbi_linkup = 1;
   9304 			if_link_state_change(ifp, LINK_STATE_UP);
   9305 		} else {
   9306 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9307 				device_xname(sc->sc_dev)));
   9308 			mii->mii_media_status |= IFM_NONE;
   9309 			sc->sc_tbi_linkup = 0;
   9310 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9311 			wm_tbi_serdes_set_linkled(sc);
   9312 			return;
   9313 		}
   9314 		mii->mii_media_active |= IFM_1000_SX;
   9315 		if ((reg & PCS_LSTS_FDX) != 0)
   9316 			mii->mii_media_active |= IFM_FDX;
   9317 		else
   9318 			mii->mii_media_active |= IFM_HDX;
   9319 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9320 			/* Check flow */
   9321 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9322 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9323 				DPRINTF(WM_DEBUG_LINK,
   9324 				    ("XXX LINKOK but not ACOMP\n"));
   9325 				return;
   9326 			}
   9327 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9328 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9329 			DPRINTF(WM_DEBUG_LINK,
   9330 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9331 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9332 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9333 				mii->mii_media_active |= IFM_FLOW
   9334 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9335 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9336 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9337 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9338 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9339 				mii->mii_media_active |= IFM_FLOW
   9340 				    | IFM_ETH_TXPAUSE;
   9341 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9342 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9343 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9344 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9345 				mii->mii_media_active |= IFM_FLOW
   9346 				    | IFM_ETH_RXPAUSE;
   9347 		}
   9348 		/* Update LED */
   9349 		wm_tbi_serdes_set_linkled(sc);
   9350 	} else
   9351 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9352 		    device_xname(sc->sc_dev)));
   9353 }
   9354 
   9355 /*
   9356  * wm_linkintr:
   9357  *
   9358  *	Helper; handle link interrupts.
   9359  */
   9360 static void
   9361 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9362 {
   9363 
   9364 	KASSERT(WM_CORE_LOCKED(sc));
   9365 
   9366 	if (sc->sc_flags & WM_F_HAS_MII)
   9367 		wm_linkintr_gmii(sc, icr);
   9368 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9369 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9370 		wm_linkintr_serdes(sc, icr);
   9371 	else
   9372 		wm_linkintr_tbi(sc, icr);
   9373 }
   9374 
   9375 /*
   9376  * wm_intr_legacy:
   9377  *
   9378  *	Interrupt service routine for INTx and MSI.
   9379  */
   9380 static int
   9381 wm_intr_legacy(void *arg)
   9382 {
   9383 	struct wm_softc *sc = arg;
   9384 	struct wm_queue *wmq = &sc->sc_queue[0];
   9385 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9386 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9387 	uint32_t icr, rndval = 0;
   9388 	int handled = 0;
   9389 
   9390 	while (1 /* CONSTCOND */) {
   9391 		icr = CSR_READ(sc, WMREG_ICR);
   9392 		if ((icr & sc->sc_icr) == 0)
   9393 			break;
   9394 		if (handled == 0)
   9395 			DPRINTF(WM_DEBUG_TX,
   9396 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9397 		if (rndval == 0)
   9398 			rndval = icr;
   9399 
   9400 		mutex_enter(rxq->rxq_lock);
   9401 
   9402 		if (rxq->rxq_stopping) {
   9403 			mutex_exit(rxq->rxq_lock);
   9404 			break;
   9405 		}
   9406 
   9407 		handled = 1;
   9408 
   9409 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9410 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9411 			DPRINTF(WM_DEBUG_RX,
   9412 			    ("%s: RX: got Rx intr 0x%08x\n",
   9413 				device_xname(sc->sc_dev),
   9414 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9415 			WM_Q_EVCNT_INCR(rxq, intr);
   9416 		}
   9417 #endif
   9418 		/*
   9419 		 * wm_rxeof() does *not* call upper layer functions directly,
   9420 		 * as if_percpuq_enqueue() just call softint_schedule().
   9421 		 * So, we can call wm_rxeof() in interrupt context.
   9422 		 */
   9423 		wm_rxeof(rxq, UINT_MAX);
   9424 		/* Fill lower bits with RX index. See below for the upper. */
   9425 		rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9426 
   9427 		mutex_exit(rxq->rxq_lock);
   9428 		mutex_enter(txq->txq_lock);
   9429 
   9430 		if (txq->txq_stopping) {
   9431 			mutex_exit(txq->txq_lock);
   9432 			break;
   9433 		}
   9434 
   9435 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9436 		if (icr & ICR_TXDW) {
   9437 			DPRINTF(WM_DEBUG_TX,
   9438 			    ("%s: TX: got TXDW interrupt\n",
   9439 				device_xname(sc->sc_dev)));
   9440 			WM_Q_EVCNT_INCR(txq, txdw);
   9441 		}
   9442 #endif
   9443 		wm_txeof(txq, UINT_MAX);
   9444 		/* Fill upper bits with TX index. See above for the lower. */
   9445 		rndval = txq->txq_next * WM_NRXDESC;
   9446 
   9447 		mutex_exit(txq->txq_lock);
   9448 		WM_CORE_LOCK(sc);
   9449 
   9450 		if (sc->sc_core_stopping) {
   9451 			WM_CORE_UNLOCK(sc);
   9452 			break;
   9453 		}
   9454 
   9455 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9456 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9457 			wm_linkintr(sc, icr);
   9458 		}
   9459 
   9460 		WM_CORE_UNLOCK(sc);
   9461 
   9462 		if (icr & ICR_RXO) {
   9463 #if defined(WM_DEBUG)
   9464 			log(LOG_WARNING, "%s: Receive overrun\n",
   9465 			    device_xname(sc->sc_dev));
   9466 #endif /* defined(WM_DEBUG) */
   9467 		}
   9468 	}
   9469 
   9470 	rnd_add_uint32(&sc->sc_queue[0].rnd_source, rndval);
   9471 
   9472 	if (handled) {
   9473 		/* Try to get more packets going. */
   9474 		softint_schedule(wmq->wmq_si);
   9475 	}
   9476 
   9477 	return handled;
   9478 }
   9479 
   9480 static inline void
   9481 wm_txrxintr_disable(struct wm_queue *wmq)
   9482 {
   9483 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9484 
   9485 	if (sc->sc_type == WM_T_82574)
   9486 		CSR_WRITE(sc, WMREG_IMC,
   9487 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9488 	else if (sc->sc_type == WM_T_82575)
   9489 		CSR_WRITE(sc, WMREG_EIMC,
   9490 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9491 	else
   9492 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9493 }
   9494 
   9495 static inline void
   9496 wm_txrxintr_enable(struct wm_queue *wmq)
   9497 {
   9498 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9499 
   9500 	wm_itrs_calculate(sc, wmq);
   9501 
   9502 	/*
   9503 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9504 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9505 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9506 	 * while each wm_handle_queue(wmq) is runnig.
   9507 	 */
   9508 	if (sc->sc_type == WM_T_82574)
   9509 		CSR_WRITE(sc, WMREG_IMS,
   9510 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9511 	else if (sc->sc_type == WM_T_82575)
   9512 		CSR_WRITE(sc, WMREG_EIMS,
   9513 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9514 	else
   9515 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9516 }
   9517 
   9518 static int
   9519 wm_txrxintr_msix(void *arg)
   9520 {
   9521 	struct wm_queue *wmq = arg;
   9522 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9523 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9524 	struct wm_softc *sc = txq->txq_sc;
   9525 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9526 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9527 	uint32_t rndval = 0;
   9528 	bool txmore;
   9529 	bool rxmore;
   9530 
   9531 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9532 
   9533 	DPRINTF(WM_DEBUG_TX,
   9534 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9535 
   9536 	wm_txrxintr_disable(wmq);
   9537 
   9538 	mutex_enter(txq->txq_lock);
   9539 
   9540 	if (txq->txq_stopping) {
   9541 		mutex_exit(txq->txq_lock);
   9542 		return 0;
   9543 	}
   9544 
   9545 	WM_Q_EVCNT_INCR(txq, txdw);
   9546 	txmore = wm_txeof(txq, txlimit);
   9547 	/* Fill upper bits with TX index. See below for the lower. */
   9548 	rndval = txq->txq_next * WM_NRXDESC;
   9549 	/* wm_deferred start() is done in wm_handle_queue(). */
   9550 	mutex_exit(txq->txq_lock);
   9551 
   9552 	DPRINTF(WM_DEBUG_RX,
   9553 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9554 	mutex_enter(rxq->rxq_lock);
   9555 
   9556 	if (rxq->rxq_stopping) {
   9557 		mutex_exit(rxq->rxq_lock);
   9558 		return 0;
   9559 	}
   9560 
   9561 	WM_Q_EVCNT_INCR(rxq, intr);
   9562 	rxmore = wm_rxeof(rxq, rxlimit);
   9563 
   9564 	/* Fill lower bits with RX index. See above for the upper. */
   9565 	rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9566 	mutex_exit(rxq->rxq_lock);
   9567 
   9568 	wm_itrs_writereg(sc, wmq);
   9569 
   9570 	/*
   9571 	 * This function is called in the hardware interrupt context and
   9572 	 * per-CPU, so it's not required to take a lock.
   9573 	 */
   9574 	if (rndval != 0)
   9575 		rnd_add_uint32(&sc->sc_queue[wmq->wmq_id].rnd_source, rndval);
   9576 
   9577 	if (txmore || rxmore)
   9578 		softint_schedule(wmq->wmq_si);
   9579 	else
   9580 		wm_txrxintr_enable(wmq);
   9581 
   9582 	return 1;
   9583 }
   9584 
   9585 static void
   9586 wm_handle_queue(void *arg)
   9587 {
   9588 	struct wm_queue *wmq = arg;
   9589 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9590 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9591 	struct wm_softc *sc = txq->txq_sc;
   9592 	u_int txlimit = sc->sc_tx_process_limit;
   9593 	u_int rxlimit = sc->sc_rx_process_limit;
   9594 	bool txmore;
   9595 	bool rxmore;
   9596 
   9597 	mutex_enter(txq->txq_lock);
   9598 	if (txq->txq_stopping) {
   9599 		mutex_exit(txq->txq_lock);
   9600 		return;
   9601 	}
   9602 	txmore = wm_txeof(txq, txlimit);
   9603 	wm_deferred_start_locked(txq);
   9604 	mutex_exit(txq->txq_lock);
   9605 
   9606 	mutex_enter(rxq->rxq_lock);
   9607 	if (rxq->rxq_stopping) {
   9608 		mutex_exit(rxq->rxq_lock);
   9609 		return;
   9610 	}
   9611 	WM_Q_EVCNT_INCR(rxq, defer);
   9612 	rxmore = wm_rxeof(rxq, rxlimit);
   9613 	mutex_exit(rxq->rxq_lock);
   9614 
   9615 	if (txmore || rxmore)
   9616 		softint_schedule(wmq->wmq_si);
   9617 	else
   9618 		wm_txrxintr_enable(wmq);
   9619 }
   9620 
   9621 /*
   9622  * wm_linkintr_msix:
   9623  *
   9624  *	Interrupt service routine for link status change for MSI-X.
   9625  */
   9626 static int
   9627 wm_linkintr_msix(void *arg)
   9628 {
   9629 	struct wm_softc *sc = arg;
   9630 	uint32_t reg;
   9631 	bool has_rxo;
   9632 
   9633 	reg = CSR_READ(sc, WMREG_ICR);
   9634 	WM_CORE_LOCK(sc);
   9635 	DPRINTF(WM_DEBUG_LINK,
   9636 	    ("%s: LINK: got link intr. ICR = %08x\n",
   9637 		device_xname(sc->sc_dev), reg));
   9638 
   9639 	if (sc->sc_core_stopping)
   9640 		goto out;
   9641 
   9642 	if ((reg & ICR_LSC) != 0) {
   9643 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9644 		wm_linkintr(sc, ICR_LSC);
   9645 	}
   9646 
   9647 	/*
   9648 	 * XXX 82574 MSI-X mode workaround
   9649 	 *
   9650 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9651 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9652 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9653 	 * interrupts by writing WMREG_ICS to process receive packets.
   9654 	 */
   9655 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9656 #if defined(WM_DEBUG)
   9657 		log(LOG_WARNING, "%s: Receive overrun\n",
   9658 		    device_xname(sc->sc_dev));
   9659 #endif /* defined(WM_DEBUG) */
   9660 
   9661 		has_rxo = true;
   9662 		/*
   9663 		 * The RXO interrupt is very high rate when receive traffic is
   9664 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9665 		 * interrupts. ICR_OTHER will be enabled at the end of
   9666 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9667 		 * ICR_RXQ(1) interrupts.
   9668 		 */
   9669 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9670 
   9671 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9672 	}
   9673 
   9674 
   9675 
   9676 out:
   9677 	WM_CORE_UNLOCK(sc);
   9678 
   9679 	if (sc->sc_type == WM_T_82574) {
   9680 		if (!has_rxo)
   9681 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9682 		else
   9683 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9684 	} else if (sc->sc_type == WM_T_82575)
   9685 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9686 	else
   9687 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9688 
   9689 	return 1;
   9690 }
   9691 
   9692 /*
   9693  * Media related.
   9694  * GMII, SGMII, TBI (and SERDES)
   9695  */
   9696 
   9697 /* Common */
   9698 
   9699 /*
   9700  * wm_tbi_serdes_set_linkled:
   9701  *
   9702  *	Update the link LED on TBI and SERDES devices.
   9703  */
   9704 static void
   9705 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9706 {
   9707 
   9708 	if (sc->sc_tbi_linkup)
   9709 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9710 	else
   9711 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9712 
   9713 	/* 82540 or newer devices are active low */
   9714 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9715 
   9716 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9717 }
   9718 
   9719 /* GMII related */
   9720 
   9721 /*
   9722  * wm_gmii_reset:
   9723  *
   9724  *	Reset the PHY.
   9725  */
   9726 static void
   9727 wm_gmii_reset(struct wm_softc *sc)
   9728 {
   9729 	uint32_t reg;
   9730 	int rv;
   9731 
   9732 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9733 		device_xname(sc->sc_dev), __func__));
   9734 
   9735 	rv = sc->phy.acquire(sc);
   9736 	if (rv != 0) {
   9737 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9738 		    __func__);
   9739 		return;
   9740 	}
   9741 
   9742 	switch (sc->sc_type) {
   9743 	case WM_T_82542_2_0:
   9744 	case WM_T_82542_2_1:
   9745 		/* null */
   9746 		break;
   9747 	case WM_T_82543:
   9748 		/*
   9749 		 * With 82543, we need to force speed and duplex on the MAC
   9750 		 * equal to what the PHY speed and duplex configuration is.
   9751 		 * In addition, we need to perform a hardware reset on the PHY
   9752 		 * to take it out of reset.
   9753 		 */
   9754 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9755 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9756 
   9757 		/* The PHY reset pin is active-low. */
   9758 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9759 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9760 		    CTRL_EXT_SWDPIN(4));
   9761 		reg |= CTRL_EXT_SWDPIO(4);
   9762 
   9763 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9764 		CSR_WRITE_FLUSH(sc);
   9765 		delay(10*1000);
   9766 
   9767 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9768 		CSR_WRITE_FLUSH(sc);
   9769 		delay(150);
   9770 #if 0
   9771 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9772 #endif
   9773 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9774 		break;
   9775 	case WM_T_82544:	/* Reset 10000us */
   9776 	case WM_T_82540:
   9777 	case WM_T_82545:
   9778 	case WM_T_82545_3:
   9779 	case WM_T_82546:
   9780 	case WM_T_82546_3:
   9781 	case WM_T_82541:
   9782 	case WM_T_82541_2:
   9783 	case WM_T_82547:
   9784 	case WM_T_82547_2:
   9785 	case WM_T_82571:	/* Reset 100us */
   9786 	case WM_T_82572:
   9787 	case WM_T_82573:
   9788 	case WM_T_82574:
   9789 	case WM_T_82575:
   9790 	case WM_T_82576:
   9791 	case WM_T_82580:
   9792 	case WM_T_I350:
   9793 	case WM_T_I354:
   9794 	case WM_T_I210:
   9795 	case WM_T_I211:
   9796 	case WM_T_82583:
   9797 	case WM_T_80003:
   9798 		/* Generic reset */
   9799 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9800 		CSR_WRITE_FLUSH(sc);
   9801 		delay(20000);
   9802 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9803 		CSR_WRITE_FLUSH(sc);
   9804 		delay(20000);
   9805 
   9806 		if ((sc->sc_type == WM_T_82541)
   9807 		    || (sc->sc_type == WM_T_82541_2)
   9808 		    || (sc->sc_type == WM_T_82547)
   9809 		    || (sc->sc_type == WM_T_82547_2)) {
   9810 			/* Workaround for igp are done in igp_reset() */
   9811 			/* XXX add code to set LED after phy reset */
   9812 		}
   9813 		break;
   9814 	case WM_T_ICH8:
   9815 	case WM_T_ICH9:
   9816 	case WM_T_ICH10:
   9817 	case WM_T_PCH:
   9818 	case WM_T_PCH2:
   9819 	case WM_T_PCH_LPT:
   9820 	case WM_T_PCH_SPT:
   9821 	case WM_T_PCH_CNP:
   9822 		/* Generic reset */
   9823 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9824 		CSR_WRITE_FLUSH(sc);
   9825 		delay(100);
   9826 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9827 		CSR_WRITE_FLUSH(sc);
   9828 		delay(150);
   9829 		break;
   9830 	default:
   9831 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9832 		    __func__);
   9833 		break;
   9834 	}
   9835 
   9836 	sc->phy.release(sc);
   9837 
   9838 	/* get_cfg_done */
   9839 	wm_get_cfg_done(sc);
   9840 
   9841 	/* Extra setup */
   9842 	switch (sc->sc_type) {
   9843 	case WM_T_82542_2_0:
   9844 	case WM_T_82542_2_1:
   9845 	case WM_T_82543:
   9846 	case WM_T_82544:
   9847 	case WM_T_82540:
   9848 	case WM_T_82545:
   9849 	case WM_T_82545_3:
   9850 	case WM_T_82546:
   9851 	case WM_T_82546_3:
   9852 	case WM_T_82541_2:
   9853 	case WM_T_82547_2:
   9854 	case WM_T_82571:
   9855 	case WM_T_82572:
   9856 	case WM_T_82573:
   9857 	case WM_T_82574:
   9858 	case WM_T_82583:
   9859 	case WM_T_82575:
   9860 	case WM_T_82576:
   9861 	case WM_T_82580:
   9862 	case WM_T_I350:
   9863 	case WM_T_I354:
   9864 	case WM_T_I210:
   9865 	case WM_T_I211:
   9866 	case WM_T_80003:
   9867 		/* Null */
   9868 		break;
   9869 	case WM_T_82541:
   9870 	case WM_T_82547:
   9871 		/* XXX Configure actively LED after PHY reset */
   9872 		break;
   9873 	case WM_T_ICH8:
   9874 	case WM_T_ICH9:
   9875 	case WM_T_ICH10:
   9876 	case WM_T_PCH:
   9877 	case WM_T_PCH2:
   9878 	case WM_T_PCH_LPT:
   9879 	case WM_T_PCH_SPT:
   9880 	case WM_T_PCH_CNP:
   9881 		wm_phy_post_reset(sc);
   9882 		break;
   9883 	default:
   9884 		panic("%s: unknown type\n", __func__);
   9885 		break;
   9886 	}
   9887 }
   9888 
   9889 /*
   9890  * Setup sc_phytype and mii_{read|write}reg.
   9891  *
   9892  *  To identify PHY type, correct read/write function should be selected.
   9893  * To select correct read/write function, PCI ID or MAC type are required
   9894  * without accessing PHY registers.
   9895  *
   9896  *  On the first call of this function, PHY ID is not known yet. Check
   9897  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9898  * result might be incorrect.
   9899  *
   9900  *  In the second call, PHY OUI and model is used to identify PHY type.
   9901  * It might not be perfect because of the lack of compared entry, but it
   9902  * would be better than the first call.
   9903  *
   9904  *  If the detected new result and previous assumption is different,
   9905  * diagnous message will be printed.
   9906  */
   9907 static void
   9908 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9909     uint16_t phy_model)
   9910 {
   9911 	device_t dev = sc->sc_dev;
   9912 	struct mii_data *mii = &sc->sc_mii;
   9913 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9914 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9915 	mii_readreg_t new_readreg;
   9916 	mii_writereg_t new_writereg;
   9917 
   9918 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9919 		device_xname(sc->sc_dev), __func__));
   9920 
   9921 	if (mii->mii_readreg == NULL) {
   9922 		/*
   9923 		 *  This is the first call of this function. For ICH and PCH
   9924 		 * variants, it's difficult to determine the PHY access method
   9925 		 * by sc_type, so use the PCI product ID for some devices.
   9926 		 */
   9927 
   9928 		switch (sc->sc_pcidevid) {
   9929 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9930 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9931 			/* 82577 */
   9932 			new_phytype = WMPHY_82577;
   9933 			break;
   9934 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9935 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9936 			/* 82578 */
   9937 			new_phytype = WMPHY_82578;
   9938 			break;
   9939 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9940 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9941 			/* 82579 */
   9942 			new_phytype = WMPHY_82579;
   9943 			break;
   9944 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9945 		case PCI_PRODUCT_INTEL_82801I_BM:
   9946 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9947 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9948 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9949 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9950 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9951 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9952 			/* ICH8, 9, 10 with 82567 */
   9953 			new_phytype = WMPHY_BM;
   9954 			break;
   9955 		default:
   9956 			break;
   9957 		}
   9958 	} else {
   9959 		/* It's not the first call. Use PHY OUI and model */
   9960 		switch (phy_oui) {
   9961 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9962 			switch (phy_model) {
   9963 			case 0x0004: /* XXX */
   9964 				new_phytype = WMPHY_82578;
   9965 				break;
   9966 			default:
   9967 				break;
   9968 			}
   9969 			break;
   9970 		case MII_OUI_xxMARVELL:
   9971 			switch (phy_model) {
   9972 			case MII_MODEL_xxMARVELL_I210:
   9973 				new_phytype = WMPHY_I210;
   9974 				break;
   9975 			case MII_MODEL_xxMARVELL_E1011:
   9976 			case MII_MODEL_xxMARVELL_E1000_3:
   9977 			case MII_MODEL_xxMARVELL_E1000_5:
   9978 			case MII_MODEL_xxMARVELL_E1112:
   9979 				new_phytype = WMPHY_M88;
   9980 				break;
   9981 			case MII_MODEL_xxMARVELL_E1149:
   9982 				new_phytype = WMPHY_BM;
   9983 				break;
   9984 			case MII_MODEL_xxMARVELL_E1111:
   9985 			case MII_MODEL_xxMARVELL_I347:
   9986 			case MII_MODEL_xxMARVELL_E1512:
   9987 			case MII_MODEL_xxMARVELL_E1340M:
   9988 			case MII_MODEL_xxMARVELL_E1543:
   9989 				new_phytype = WMPHY_M88;
   9990 				break;
   9991 			case MII_MODEL_xxMARVELL_I82563:
   9992 				new_phytype = WMPHY_GG82563;
   9993 				break;
   9994 			default:
   9995 				break;
   9996 			}
   9997 			break;
   9998 		case MII_OUI_INTEL:
   9999 			switch (phy_model) {
   10000 			case MII_MODEL_INTEL_I82577:
   10001 				new_phytype = WMPHY_82577;
   10002 				break;
   10003 			case MII_MODEL_INTEL_I82579:
   10004 				new_phytype = WMPHY_82579;
   10005 				break;
   10006 			case MII_MODEL_INTEL_I217:
   10007 				new_phytype = WMPHY_I217;
   10008 				break;
   10009 			case MII_MODEL_INTEL_I82580:
   10010 			case MII_MODEL_INTEL_I350:
   10011 				new_phytype = WMPHY_82580;
   10012 				break;
   10013 			default:
   10014 				break;
   10015 			}
   10016 			break;
   10017 		case MII_OUI_yyINTEL:
   10018 			switch (phy_model) {
   10019 			case MII_MODEL_yyINTEL_I82562G:
   10020 			case MII_MODEL_yyINTEL_I82562EM:
   10021 			case MII_MODEL_yyINTEL_I82562ET:
   10022 				new_phytype = WMPHY_IFE;
   10023 				break;
   10024 			case MII_MODEL_yyINTEL_IGP01E1000:
   10025 				new_phytype = WMPHY_IGP;
   10026 				break;
   10027 			case MII_MODEL_yyINTEL_I82566:
   10028 				new_phytype = WMPHY_IGP_3;
   10029 				break;
   10030 			default:
   10031 				break;
   10032 			}
   10033 			break;
   10034 		default:
   10035 			break;
   10036 		}
   10037 		if (new_phytype == WMPHY_UNKNOWN)
   10038 			aprint_verbose_dev(dev,
   10039 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   10040 			    __func__, phy_oui, phy_model);
   10041 
   10042 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10043 		    && (sc->sc_phytype != new_phytype )) {
   10044 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10045 			    "was incorrect. PHY type from PHY ID = %u\n",
   10046 			    sc->sc_phytype, new_phytype);
   10047 		}
   10048 	}
   10049 
   10050 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10051 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10052 		/* SGMII */
   10053 		new_readreg = wm_sgmii_readreg;
   10054 		new_writereg = wm_sgmii_writereg;
   10055 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10056 		/* BM2 (phyaddr == 1) */
   10057 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10058 		    && (new_phytype != WMPHY_BM)
   10059 		    && (new_phytype != WMPHY_UNKNOWN))
   10060 			doubt_phytype = new_phytype;
   10061 		new_phytype = WMPHY_BM;
   10062 		new_readreg = wm_gmii_bm_readreg;
   10063 		new_writereg = wm_gmii_bm_writereg;
   10064 	} else if (sc->sc_type >= WM_T_PCH) {
   10065 		/* All PCH* use _hv_ */
   10066 		new_readreg = wm_gmii_hv_readreg;
   10067 		new_writereg = wm_gmii_hv_writereg;
   10068 	} else if (sc->sc_type >= WM_T_ICH8) {
   10069 		/* non-82567 ICH8, 9 and 10 */
   10070 		new_readreg = wm_gmii_i82544_readreg;
   10071 		new_writereg = wm_gmii_i82544_writereg;
   10072 	} else if (sc->sc_type >= WM_T_80003) {
   10073 		/* 80003 */
   10074 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10075 		    && (new_phytype != WMPHY_GG82563)
   10076 		    && (new_phytype != WMPHY_UNKNOWN))
   10077 			doubt_phytype = new_phytype;
   10078 		new_phytype = WMPHY_GG82563;
   10079 		new_readreg = wm_gmii_i80003_readreg;
   10080 		new_writereg = wm_gmii_i80003_writereg;
   10081 	} else if (sc->sc_type >= WM_T_I210) {
   10082 		/* I210 and I211 */
   10083 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10084 		    && (new_phytype != WMPHY_I210)
   10085 		    && (new_phytype != WMPHY_UNKNOWN))
   10086 			doubt_phytype = new_phytype;
   10087 		new_phytype = WMPHY_I210;
   10088 		new_readreg = wm_gmii_gs40g_readreg;
   10089 		new_writereg = wm_gmii_gs40g_writereg;
   10090 	} else if (sc->sc_type >= WM_T_82580) {
   10091 		/* 82580, I350 and I354 */
   10092 		new_readreg = wm_gmii_82580_readreg;
   10093 		new_writereg = wm_gmii_82580_writereg;
   10094 	} else if (sc->sc_type >= WM_T_82544) {
   10095 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10096 		new_readreg = wm_gmii_i82544_readreg;
   10097 		new_writereg = wm_gmii_i82544_writereg;
   10098 	} else {
   10099 		new_readreg = wm_gmii_i82543_readreg;
   10100 		new_writereg = wm_gmii_i82543_writereg;
   10101 	}
   10102 
   10103 	if (new_phytype == WMPHY_BM) {
   10104 		/* All BM use _bm_ */
   10105 		new_readreg = wm_gmii_bm_readreg;
   10106 		new_writereg = wm_gmii_bm_writereg;
   10107 	}
   10108 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10109 		/* All PCH* use _hv_ */
   10110 		new_readreg = wm_gmii_hv_readreg;
   10111 		new_writereg = wm_gmii_hv_writereg;
   10112 	}
   10113 
   10114 	/* Diag output */
   10115 	if (doubt_phytype != WMPHY_UNKNOWN)
   10116 		aprint_error_dev(dev, "Assumed new PHY type was "
   10117 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10118 		    new_phytype);
   10119 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10120 	    && (sc->sc_phytype != new_phytype ))
   10121 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10122 		    "was incorrect. New PHY type = %u\n",
   10123 		    sc->sc_phytype, new_phytype);
   10124 
   10125 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   10126 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   10127 
   10128 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   10129 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   10130 		    "function was incorrect.\n");
   10131 
   10132 	/* Update now */
   10133 	sc->sc_phytype = new_phytype;
   10134 	mii->mii_readreg = new_readreg;
   10135 	mii->mii_writereg = new_writereg;
   10136 	if (new_readreg == wm_gmii_hv_readreg) {
   10137 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10138 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10139 	} else if (new_readreg == wm_sgmii_readreg) {
   10140 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10141 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10142 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10143 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10144 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10145 	}
   10146 }
   10147 
   10148 /*
   10149  * wm_get_phy_id_82575:
   10150  *
   10151  * Return PHY ID. Return -1 if it failed.
   10152  */
   10153 static int
   10154 wm_get_phy_id_82575(struct wm_softc *sc)
   10155 {
   10156 	uint32_t reg;
   10157 	int phyid = -1;
   10158 
   10159 	/* XXX */
   10160 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10161 		return -1;
   10162 
   10163 	if (wm_sgmii_uses_mdio(sc)) {
   10164 		switch (sc->sc_type) {
   10165 		case WM_T_82575:
   10166 		case WM_T_82576:
   10167 			reg = CSR_READ(sc, WMREG_MDIC);
   10168 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10169 			break;
   10170 		case WM_T_82580:
   10171 		case WM_T_I350:
   10172 		case WM_T_I354:
   10173 		case WM_T_I210:
   10174 		case WM_T_I211:
   10175 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10176 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10177 			break;
   10178 		default:
   10179 			return -1;
   10180 		}
   10181 	}
   10182 
   10183 	return phyid;
   10184 }
   10185 
   10186 
   10187 /*
   10188  * wm_gmii_mediainit:
   10189  *
   10190  *	Initialize media for use on 1000BASE-T devices.
   10191  */
   10192 static void
   10193 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10194 {
   10195 	device_t dev = sc->sc_dev;
   10196 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10197 	struct mii_data *mii = &sc->sc_mii;
   10198 	uint32_t reg;
   10199 
   10200 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10201 		device_xname(sc->sc_dev), __func__));
   10202 
   10203 	/* We have GMII. */
   10204 	sc->sc_flags |= WM_F_HAS_MII;
   10205 
   10206 	if (sc->sc_type == WM_T_80003)
   10207 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10208 	else
   10209 		sc->sc_tipg = TIPG_1000T_DFLT;
   10210 
   10211 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10212 	if ((sc->sc_type == WM_T_82580)
   10213 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10214 	    || (sc->sc_type == WM_T_I211)) {
   10215 		reg = CSR_READ(sc, WMREG_PHPM);
   10216 		reg &= ~PHPM_GO_LINK_D;
   10217 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10218 	}
   10219 
   10220 	/*
   10221 	 * Let the chip set speed/duplex on its own based on
   10222 	 * signals from the PHY.
   10223 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10224 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10225 	 */
   10226 	sc->sc_ctrl |= CTRL_SLU;
   10227 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10228 
   10229 	/* Initialize our media structures and probe the GMII. */
   10230 	mii->mii_ifp = ifp;
   10231 
   10232 	mii->mii_statchg = wm_gmii_statchg;
   10233 
   10234 	/* get PHY control from SMBus to PCIe */
   10235 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10236 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10237 	    || (sc->sc_type == WM_T_PCH_CNP))
   10238 		wm_init_phy_workarounds_pchlan(sc);
   10239 
   10240 	wm_gmii_reset(sc);
   10241 
   10242 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10243 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10244 	    wm_gmii_mediastatus);
   10245 
   10246 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10247 	    || (sc->sc_type == WM_T_82580)
   10248 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10249 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10250 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10251 			/* Attach only one port */
   10252 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10253 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10254 		} else {
   10255 			int i, id;
   10256 			uint32_t ctrl_ext;
   10257 
   10258 			id = wm_get_phy_id_82575(sc);
   10259 			if (id != -1) {
   10260 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10261 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10262 			}
   10263 			if ((id == -1)
   10264 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10265 				/* Power on sgmii phy if it is disabled */
   10266 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10267 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10268 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10269 				CSR_WRITE_FLUSH(sc);
   10270 				delay(300*1000); /* XXX too long */
   10271 
   10272 				/* From 1 to 8 */
   10273 				for (i = 1; i < 8; i++)
   10274 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10275 					    0xffffffff, i, MII_OFFSET_ANY,
   10276 					    MIIF_DOPAUSE);
   10277 
   10278 				/* Restore previous sfp cage power state */
   10279 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10280 			}
   10281 		}
   10282 	} else
   10283 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10284 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10285 
   10286 	/*
   10287 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10288 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10289 	 */
   10290 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10291 		|| (sc->sc_type == WM_T_PCH_SPT)
   10292 		|| (sc->sc_type == WM_T_PCH_CNP))
   10293 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10294 		wm_set_mdio_slow_mode_hv(sc);
   10295 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10296 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10297 	}
   10298 
   10299 	/*
   10300 	 * (For ICH8 variants)
   10301 	 * If PHY detection failed, use BM's r/w function and retry.
   10302 	 */
   10303 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10304 		/* if failed, retry with *_bm_* */
   10305 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10306 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10307 		    sc->sc_phytype);
   10308 		sc->sc_phytype = WMPHY_BM;
   10309 		mii->mii_readreg = wm_gmii_bm_readreg;
   10310 		mii->mii_writereg = wm_gmii_bm_writereg;
   10311 
   10312 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10313 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10314 	}
   10315 
   10316 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10317 		/* Any PHY wasn't find */
   10318 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10319 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10320 		sc->sc_phytype = WMPHY_NONE;
   10321 	} else {
   10322 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10323 
   10324 		/*
   10325 		 * PHY Found! Check PHY type again by the second call of
   10326 		 * wm_gmii_setup_phytype.
   10327 		 */
   10328 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10329 		    child->mii_mpd_model);
   10330 
   10331 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10332 	}
   10333 }
   10334 
   10335 /*
   10336  * wm_gmii_mediachange:	[ifmedia interface function]
   10337  *
   10338  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10339  */
   10340 static int
   10341 wm_gmii_mediachange(struct ifnet *ifp)
   10342 {
   10343 	struct wm_softc *sc = ifp->if_softc;
   10344 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10345 	int rc;
   10346 
   10347 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10348 		device_xname(sc->sc_dev), __func__));
   10349 	if ((ifp->if_flags & IFF_UP) == 0)
   10350 		return 0;
   10351 
   10352 	/* Disable D0 LPLU. */
   10353 	wm_lplu_d0_disable(sc);
   10354 
   10355 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10356 	sc->sc_ctrl |= CTRL_SLU;
   10357 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10358 	    || (sc->sc_type > WM_T_82543)) {
   10359 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10360 	} else {
   10361 		sc->sc_ctrl &= ~CTRL_ASDE;
   10362 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10363 		if (ife->ifm_media & IFM_FDX)
   10364 			sc->sc_ctrl |= CTRL_FD;
   10365 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10366 		case IFM_10_T:
   10367 			sc->sc_ctrl |= CTRL_SPEED_10;
   10368 			break;
   10369 		case IFM_100_TX:
   10370 			sc->sc_ctrl |= CTRL_SPEED_100;
   10371 			break;
   10372 		case IFM_1000_T:
   10373 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10374 			break;
   10375 		case IFM_NONE:
   10376 			/* There is no specific setting for IFM_NONE */
   10377 			break;
   10378 		default:
   10379 			panic("wm_gmii_mediachange: bad media 0x%x",
   10380 			    ife->ifm_media);
   10381 		}
   10382 	}
   10383 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10384 	CSR_WRITE_FLUSH(sc);
   10385 	if (sc->sc_type <= WM_T_82543)
   10386 		wm_gmii_reset(sc);
   10387 
   10388 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10389 		return 0;
   10390 	return rc;
   10391 }
   10392 
   10393 /*
   10394  * wm_gmii_mediastatus:	[ifmedia interface function]
   10395  *
   10396  *	Get the current interface media status on a 1000BASE-T device.
   10397  */
   10398 static void
   10399 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10400 {
   10401 	struct wm_softc *sc = ifp->if_softc;
   10402 
   10403 	ether_mediastatus(ifp, ifmr);
   10404 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10405 	    | sc->sc_flowflags;
   10406 }
   10407 
   10408 #define	MDI_IO		CTRL_SWDPIN(2)
   10409 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10410 #define	MDI_CLK		CTRL_SWDPIN(3)
   10411 
   10412 static void
   10413 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10414 {
   10415 	uint32_t i, v;
   10416 
   10417 	v = CSR_READ(sc, WMREG_CTRL);
   10418 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10419 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10420 
   10421 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10422 		if (data & i)
   10423 			v |= MDI_IO;
   10424 		else
   10425 			v &= ~MDI_IO;
   10426 		CSR_WRITE(sc, WMREG_CTRL, v);
   10427 		CSR_WRITE_FLUSH(sc);
   10428 		delay(10);
   10429 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10430 		CSR_WRITE_FLUSH(sc);
   10431 		delay(10);
   10432 		CSR_WRITE(sc, WMREG_CTRL, v);
   10433 		CSR_WRITE_FLUSH(sc);
   10434 		delay(10);
   10435 	}
   10436 }
   10437 
   10438 static uint16_t
   10439 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10440 {
   10441 	uint32_t v, i;
   10442 	uint16_t data = 0;
   10443 
   10444 	v = CSR_READ(sc, WMREG_CTRL);
   10445 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10446 	v |= CTRL_SWDPIO(3);
   10447 
   10448 	CSR_WRITE(sc, WMREG_CTRL, v);
   10449 	CSR_WRITE_FLUSH(sc);
   10450 	delay(10);
   10451 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10452 	CSR_WRITE_FLUSH(sc);
   10453 	delay(10);
   10454 	CSR_WRITE(sc, WMREG_CTRL, v);
   10455 	CSR_WRITE_FLUSH(sc);
   10456 	delay(10);
   10457 
   10458 	for (i = 0; i < 16; i++) {
   10459 		data <<= 1;
   10460 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10461 		CSR_WRITE_FLUSH(sc);
   10462 		delay(10);
   10463 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10464 			data |= 1;
   10465 		CSR_WRITE(sc, WMREG_CTRL, v);
   10466 		CSR_WRITE_FLUSH(sc);
   10467 		delay(10);
   10468 	}
   10469 
   10470 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10471 	CSR_WRITE_FLUSH(sc);
   10472 	delay(10);
   10473 	CSR_WRITE(sc, WMREG_CTRL, v);
   10474 	CSR_WRITE_FLUSH(sc);
   10475 	delay(10);
   10476 
   10477 	return data;
   10478 }
   10479 
   10480 #undef MDI_IO
   10481 #undef MDI_DIR
   10482 #undef MDI_CLK
   10483 
   10484 /*
   10485  * wm_gmii_i82543_readreg:	[mii interface function]
   10486  *
   10487  *	Read a PHY register on the GMII (i82543 version).
   10488  */
   10489 static int
   10490 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10491 {
   10492 	struct wm_softc *sc = device_private(dev);
   10493 
   10494 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10495 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10496 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10497 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10498 
   10499 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10500 		device_xname(dev), phy, reg, *val));
   10501 
   10502 	return 0;
   10503 }
   10504 
   10505 /*
   10506  * wm_gmii_i82543_writereg:	[mii interface function]
   10507  *
   10508  *	Write a PHY register on the GMII (i82543 version).
   10509  */
   10510 static int
   10511 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10512 {
   10513 	struct wm_softc *sc = device_private(dev);
   10514 
   10515 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10516 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10517 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10518 	    (MII_COMMAND_START << 30), 32);
   10519 
   10520 	return 0;
   10521 }
   10522 
   10523 /*
   10524  * wm_gmii_mdic_readreg:	[mii interface function]
   10525  *
   10526  *	Read a PHY register on the GMII.
   10527  */
   10528 static int
   10529 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10530 {
   10531 	struct wm_softc *sc = device_private(dev);
   10532 	uint32_t mdic = 0;
   10533 	int i;
   10534 
   10535 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10536 	    && (reg > MII_ADDRMASK)) {
   10537 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10538 		    __func__, sc->sc_phytype, reg);
   10539 		reg &= MII_ADDRMASK;
   10540 	}
   10541 
   10542 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10543 	    MDIC_REGADD(reg));
   10544 
   10545 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10546 		delay(50);
   10547 		mdic = CSR_READ(sc, WMREG_MDIC);
   10548 		if (mdic & MDIC_READY)
   10549 			break;
   10550 	}
   10551 
   10552 	if ((mdic & MDIC_READY) == 0) {
   10553 		DPRINTF(WM_DEBUG_GMII,
   10554 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10555 			device_xname(dev), phy, reg));
   10556 		return ETIMEDOUT;
   10557 	} else if (mdic & MDIC_E) {
   10558 		/* This is normal if no PHY is present. */
   10559 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10560 			device_xname(sc->sc_dev), phy, reg));
   10561 		return -1;
   10562 	} else
   10563 		*val = MDIC_DATA(mdic);
   10564 
   10565 	/*
   10566 	 * Allow some time after each MDIC transaction to avoid
   10567 	 * reading duplicate data in the next MDIC transaction.
   10568 	 */
   10569 	if (sc->sc_type == WM_T_PCH2)
   10570 		delay(100);
   10571 
   10572 	return 0;
   10573 }
   10574 
   10575 /*
   10576  * wm_gmii_mdic_writereg:	[mii interface function]
   10577  *
   10578  *	Write a PHY register on the GMII.
   10579  */
   10580 static int
   10581 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10582 {
   10583 	struct wm_softc *sc = device_private(dev);
   10584 	uint32_t mdic = 0;
   10585 	int i;
   10586 
   10587 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10588 	    && (reg > MII_ADDRMASK)) {
   10589 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10590 		    __func__, sc->sc_phytype, reg);
   10591 		reg &= MII_ADDRMASK;
   10592 	}
   10593 
   10594 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10595 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10596 
   10597 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10598 		delay(50);
   10599 		mdic = CSR_READ(sc, WMREG_MDIC);
   10600 		if (mdic & MDIC_READY)
   10601 			break;
   10602 	}
   10603 
   10604 	if ((mdic & MDIC_READY) == 0) {
   10605 		DPRINTF(WM_DEBUG_GMII,
   10606 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10607 			device_xname(dev), phy, reg));
   10608 		return ETIMEDOUT;
   10609 	} else if (mdic & MDIC_E) {
   10610 		DPRINTF(WM_DEBUG_GMII,
   10611 		    ("%s: MDIC write error: phy %d reg %d\n",
   10612 			device_xname(dev), phy, reg));
   10613 		return -1;
   10614 	}
   10615 
   10616 	/*
   10617 	 * Allow some time after each MDIC transaction to avoid
   10618 	 * reading duplicate data in the next MDIC transaction.
   10619 	 */
   10620 	if (sc->sc_type == WM_T_PCH2)
   10621 		delay(100);
   10622 
   10623 	return 0;
   10624 }
   10625 
   10626 /*
   10627  * wm_gmii_i82544_readreg:	[mii interface function]
   10628  *
   10629  *	Read a PHY register on the GMII.
   10630  */
   10631 static int
   10632 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10633 {
   10634 	struct wm_softc *sc = device_private(dev);
   10635 	int rv;
   10636 
   10637 	if (sc->phy.acquire(sc)) {
   10638 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10639 		return -1;
   10640 	}
   10641 
   10642 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10643 
   10644 	sc->phy.release(sc);
   10645 
   10646 	return rv;
   10647 }
   10648 
   10649 static int
   10650 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10651 {
   10652 	struct wm_softc *sc = device_private(dev);
   10653 	int rv;
   10654 
   10655 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10656 		switch (sc->sc_phytype) {
   10657 		case WMPHY_IGP:
   10658 		case WMPHY_IGP_2:
   10659 		case WMPHY_IGP_3:
   10660 			rv = wm_gmii_mdic_writereg(dev, phy,
   10661 			    MII_IGPHY_PAGE_SELECT, reg);
   10662 			if (rv != 0)
   10663 				return rv;
   10664 			break;
   10665 		default:
   10666 #ifdef WM_DEBUG
   10667 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10668 			    __func__, sc->sc_phytype, reg);
   10669 #endif
   10670 			break;
   10671 		}
   10672 	}
   10673 
   10674 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10675 }
   10676 
   10677 /*
   10678  * wm_gmii_i82544_writereg:	[mii interface function]
   10679  *
   10680  *	Write a PHY register on the GMII.
   10681  */
   10682 static int
   10683 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10684 {
   10685 	struct wm_softc *sc = device_private(dev);
   10686 	int rv;
   10687 
   10688 	if (sc->phy.acquire(sc)) {
   10689 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10690 		return -1;
   10691 	}
   10692 
   10693 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10694 	sc->phy.release(sc);
   10695 
   10696 	return rv;
   10697 }
   10698 
   10699 static int
   10700 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10701 {
   10702 	struct wm_softc *sc = device_private(dev);
   10703 	int rv;
   10704 
   10705 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10706 		switch (sc->sc_phytype) {
   10707 		case WMPHY_IGP:
   10708 		case WMPHY_IGP_2:
   10709 		case WMPHY_IGP_3:
   10710 			rv = wm_gmii_mdic_writereg(dev, phy,
   10711 			    MII_IGPHY_PAGE_SELECT, reg);
   10712 			if (rv != 0)
   10713 				return rv;
   10714 			break;
   10715 		default:
   10716 #ifdef WM_DEBUG
   10717 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10718 			    __func__, sc->sc_phytype, reg);
   10719 #endif
   10720 			break;
   10721 		}
   10722 	}
   10723 
   10724 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10725 }
   10726 
   10727 /*
   10728  * wm_gmii_i80003_readreg:	[mii interface function]
   10729  *
   10730  *	Read a PHY register on the kumeran
   10731  * This could be handled by the PHY layer if we didn't have to lock the
   10732  * ressource ...
   10733  */
   10734 static int
   10735 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10736 {
   10737 	struct wm_softc *sc = device_private(dev);
   10738 	int page_select;
   10739 	uint16_t temp, temp2;
   10740 	int rv = 0;
   10741 
   10742 	if (phy != 1) /* Only one PHY on kumeran bus */
   10743 		return -1;
   10744 
   10745 	if (sc->phy.acquire(sc)) {
   10746 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10747 		return -1;
   10748 	}
   10749 
   10750 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10751 		page_select = GG82563_PHY_PAGE_SELECT;
   10752 	else {
   10753 		/*
   10754 		 * Use Alternative Page Select register to access registers
   10755 		 * 30 and 31.
   10756 		 */
   10757 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10758 	}
   10759 	temp = reg >> GG82563_PAGE_SHIFT;
   10760 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10761 		goto out;
   10762 
   10763 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10764 		/*
   10765 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10766 		 * register.
   10767 		 */
   10768 		delay(200);
   10769 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10770 		if ((rv != 0) || (temp2 != temp)) {
   10771 			device_printf(dev, "%s failed\n", __func__);
   10772 			rv = -1;
   10773 			goto out;
   10774 		}
   10775 		delay(200);
   10776 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10777 		delay(200);
   10778 	} else
   10779 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10780 
   10781 out:
   10782 	sc->phy.release(sc);
   10783 	return rv;
   10784 }
   10785 
   10786 /*
   10787  * wm_gmii_i80003_writereg:	[mii interface function]
   10788  *
   10789  *	Write a PHY register on the kumeran.
   10790  * This could be handled by the PHY layer if we didn't have to lock the
   10791  * ressource ...
   10792  */
   10793 static int
   10794 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   10795 {
   10796 	struct wm_softc *sc = device_private(dev);
   10797 	int page_select, rv;
   10798 	uint16_t temp, temp2;
   10799 
   10800 	if (phy != 1) /* Only one PHY on kumeran bus */
   10801 		return -1;
   10802 
   10803 	if (sc->phy.acquire(sc)) {
   10804 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10805 		return -1;
   10806 	}
   10807 
   10808 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10809 		page_select = GG82563_PHY_PAGE_SELECT;
   10810 	else {
   10811 		/*
   10812 		 * Use Alternative Page Select register to access registers
   10813 		 * 30 and 31.
   10814 		 */
   10815 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10816 	}
   10817 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10818 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10819 		goto out;
   10820 
   10821 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10822 		/*
   10823 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10824 		 * register.
   10825 		 */
   10826 		delay(200);
   10827 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10828 		if ((rv != 0) || (temp2 != temp)) {
   10829 			device_printf(dev, "%s failed\n", __func__);
   10830 			rv = -1;
   10831 			goto out;
   10832 		}
   10833 		delay(200);
   10834 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10835 		delay(200);
   10836 	} else
   10837 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10838 
   10839 out:
   10840 	sc->phy.release(sc);
   10841 	return rv;
   10842 }
   10843 
   10844 /*
   10845  * wm_gmii_bm_readreg:	[mii interface function]
   10846  *
   10847  *	Read a PHY register on the kumeran
   10848  * This could be handled by the PHY layer if we didn't have to lock the
   10849  * ressource ...
   10850  */
   10851 static int
   10852 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10853 {
   10854 	struct wm_softc *sc = device_private(dev);
   10855 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10856 	int rv;
   10857 
   10858 	if (sc->phy.acquire(sc)) {
   10859 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10860 		return -1;
   10861 	}
   10862 
   10863 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10864 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10865 		    || (reg == 31)) ? 1 : phy;
   10866 	/* Page 800 works differently than the rest so it has its own func */
   10867 	if (page == BM_WUC_PAGE) {
   10868 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   10869 		goto release;
   10870 	}
   10871 
   10872 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10873 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10874 		    && (sc->sc_type != WM_T_82583))
   10875 			rv = wm_gmii_mdic_writereg(dev, phy,
   10876 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10877 		else
   10878 			rv = wm_gmii_mdic_writereg(dev, phy,
   10879 			    BME1000_PHY_PAGE_SELECT, page);
   10880 		if (rv != 0)
   10881 			goto release;
   10882 	}
   10883 
   10884 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10885 
   10886 release:
   10887 	sc->phy.release(sc);
   10888 	return rv;
   10889 }
   10890 
   10891 /*
   10892  * wm_gmii_bm_writereg:	[mii interface function]
   10893  *
   10894  *	Write a PHY register on the kumeran.
   10895  * This could be handled by the PHY layer if we didn't have to lock the
   10896  * ressource ...
   10897  */
   10898 static int
   10899 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   10900 {
   10901 	struct wm_softc *sc = device_private(dev);
   10902 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10903 	int rv;
   10904 
   10905 	if (sc->phy.acquire(sc)) {
   10906 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10907 		return -1;
   10908 	}
   10909 
   10910 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10911 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10912 		    || (reg == 31)) ? 1 : phy;
   10913 	/* Page 800 works differently than the rest so it has its own func */
   10914 	if (page == BM_WUC_PAGE) {
   10915 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   10916 		goto release;
   10917 	}
   10918 
   10919 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10920 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10921 		    && (sc->sc_type != WM_T_82583))
   10922 			rv = wm_gmii_mdic_writereg(dev, phy,
   10923 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10924 		else
   10925 			rv = wm_gmii_mdic_writereg(dev, phy,
   10926 			    BME1000_PHY_PAGE_SELECT, page);
   10927 		if (rv != 0)
   10928 			goto release;
   10929 	}
   10930 
   10931 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10932 
   10933 release:
   10934 	sc->phy.release(sc);
   10935 	return rv;
   10936 }
   10937 
   10938 /*
   10939  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   10940  *  @dev: pointer to the HW structure
   10941  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   10942  *
   10943  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   10944  *  address to store contents of the BM_WUC_ENABLE_REG register.
   10945  */
   10946 static int
   10947 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10948 {
   10949 	uint16_t temp;
   10950 	int rv;
   10951 
   10952 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10953 		device_xname(dev), __func__));
   10954 
   10955 	if (!phy_regp)
   10956 		return -1;
   10957 
   10958 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   10959 
   10960 	/* Select Port Control Registers page */
   10961 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10962 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10963 	if (rv != 0)
   10964 		return rv;
   10965 
   10966 	/* Read WUCE and save it */
   10967 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   10968 	if (rv != 0)
   10969 		return rv;
   10970 
   10971 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   10972 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   10973 	 */
   10974 	temp = *phy_regp;
   10975 	temp |= BM_WUC_ENABLE_BIT;
   10976 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10977 
   10978 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   10979 		return rv;
   10980 
   10981 	/* Select Host Wakeup Registers page - caller now able to write
   10982 	 * registers on the Wakeup registers page
   10983 	 */
   10984 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10985 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   10986 }
   10987 
   10988 /*
   10989  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   10990  *  @dev: pointer to the HW structure
   10991  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   10992  *
   10993  *  Restore BM_WUC_ENABLE_REG to its original value.
   10994  *
   10995  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   10996  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   10997  *  caller.
   10998  */
   10999 static int
   11000 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11001 {
   11002 
   11003 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11004 		device_xname(dev), __func__));
   11005 
   11006 	if (!phy_regp)
   11007 		return -1;
   11008 
   11009 	/* Select Port Control Registers page */
   11010 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11011 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11012 
   11013 	/* Restore 769.17 to its original value */
   11014 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11015 
   11016 	return 0;
   11017 }
   11018 
   11019 /*
   11020  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11021  *  @sc: pointer to the HW structure
   11022  *  @offset: register offset to be read or written
   11023  *  @val: pointer to the data to read or write
   11024  *  @rd: determines if operation is read or write
   11025  *  @page_set: BM_WUC_PAGE already set and access enabled
   11026  *
   11027  *  Read the PHY register at offset and store the retrieved information in
   11028  *  data, or write data to PHY register at offset.  Note the procedure to
   11029  *  access the PHY wakeup registers is different than reading the other PHY
   11030  *  registers. It works as such:
   11031  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11032  *  2) Set page to 800 for host (801 if we were manageability)
   11033  *  3) Write the address using the address opcode (0x11)
   11034  *  4) Read or write the data using the data opcode (0x12)
   11035  *  5) Restore 769.17.2 to its original value
   11036  *
   11037  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11038  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11039  *
   11040  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11041  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11042  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11043  */
   11044 static int
   11045 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11046 	bool page_set)
   11047 {
   11048 	struct wm_softc *sc = device_private(dev);
   11049 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11050 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11051 	uint16_t wuce;
   11052 	int rv = 0;
   11053 
   11054 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11055 		device_xname(dev), __func__));
   11056 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11057 	if ((sc->sc_type == WM_T_PCH)
   11058 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11059 		device_printf(dev,
   11060 		    "Attempting to access page %d while gig enabled.\n", page);
   11061 	}
   11062 
   11063 	if (!page_set) {
   11064 		/* Enable access to PHY wakeup registers */
   11065 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11066 		if (rv != 0) {
   11067 			device_printf(dev,
   11068 			    "%s: Could not enable PHY wakeup reg access\n",
   11069 			    __func__);
   11070 			return rv;
   11071 		}
   11072 	}
   11073 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11074 		device_xname(sc->sc_dev), __func__, page, regnum));
   11075 
   11076 	/*
   11077 	 * 2) Access PHY wakeup register.
   11078 	 * See wm_access_phy_wakeup_reg_bm.
   11079 	 */
   11080 
   11081 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11082 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11083 	if (rv != 0)
   11084 		return rv;
   11085 
   11086 	if (rd) {
   11087 		/* Read the Wakeup register page value using opcode 0x12 */
   11088 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11089 	} else {
   11090 		/* Write the Wakeup register page value using opcode 0x12 */
   11091 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11092 	}
   11093 	if (rv != 0)
   11094 		return rv;
   11095 
   11096 	if (!page_set)
   11097 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11098 
   11099 	return rv;
   11100 }
   11101 
   11102 /*
   11103  * wm_gmii_hv_readreg:	[mii interface function]
   11104  *
   11105  *	Read a PHY register on the kumeran
   11106  * This could be handled by the PHY layer if we didn't have to lock the
   11107  * ressource ...
   11108  */
   11109 static int
   11110 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11111 {
   11112 	struct wm_softc *sc = device_private(dev);
   11113 	int rv;
   11114 
   11115 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11116 		device_xname(dev), __func__));
   11117 	if (sc->phy.acquire(sc)) {
   11118 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11119 		return -1;
   11120 	}
   11121 
   11122 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11123 	sc->phy.release(sc);
   11124 	return rv;
   11125 }
   11126 
   11127 static int
   11128 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11129 {
   11130 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11131 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11132 	int rv;
   11133 
   11134 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11135 
   11136 	/* Page 800 works differently than the rest so it has its own func */
   11137 	if (page == BM_WUC_PAGE)
   11138 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11139 
   11140 	/*
   11141 	 * Lower than page 768 works differently than the rest so it has its
   11142 	 * own func
   11143 	 */
   11144 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11145 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11146 		return -1;
   11147 	}
   11148 
   11149 	/*
   11150 	 * XXX I21[789] documents say that the SMBus Address register is at
   11151 	 * PHY address 01, Page 0 (not 768), Register 26.
   11152 	 */
   11153 	if (page == HV_INTC_FC_PAGE_START)
   11154 		page = 0;
   11155 
   11156 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11157 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11158 		    page << BME1000_PAGE_SHIFT);
   11159 		if (rv != 0)
   11160 			return rv;
   11161 	}
   11162 
   11163 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11164 }
   11165 
   11166 /*
   11167  * wm_gmii_hv_writereg:	[mii interface function]
   11168  *
   11169  *	Write a PHY register on the kumeran.
   11170  * This could be handled by the PHY layer if we didn't have to lock the
   11171  * ressource ...
   11172  */
   11173 static int
   11174 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11175 {
   11176 	struct wm_softc *sc = device_private(dev);
   11177 	int rv;
   11178 
   11179 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11180 		device_xname(dev), __func__));
   11181 
   11182 	if (sc->phy.acquire(sc)) {
   11183 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11184 		return -1;
   11185 	}
   11186 
   11187 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11188 	sc->phy.release(sc);
   11189 
   11190 	return rv;
   11191 }
   11192 
   11193 static int
   11194 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11195 {
   11196 	struct wm_softc *sc = device_private(dev);
   11197 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11198 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11199 	int rv;
   11200 
   11201 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11202 
   11203 	/* Page 800 works differently than the rest so it has its own func */
   11204 	if (page == BM_WUC_PAGE)
   11205 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11206 		    false);
   11207 
   11208 	/*
   11209 	 * Lower than page 768 works differently than the rest so it has its
   11210 	 * own func
   11211 	 */
   11212 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11213 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11214 		return -1;
   11215 	}
   11216 
   11217 	{
   11218 		/*
   11219 		 * XXX I21[789] documents say that the SMBus Address register
   11220 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11221 		 */
   11222 		if (page == HV_INTC_FC_PAGE_START)
   11223 			page = 0;
   11224 
   11225 		/*
   11226 		 * XXX Workaround MDIO accesses being disabled after entering
   11227 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11228 		 * register is set)
   11229 		 */
   11230 		if (sc->sc_phytype == WMPHY_82578) {
   11231 			struct mii_softc *child;
   11232 
   11233 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11234 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11235 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11236 			    && ((val & (1 << 11)) != 0)) {
   11237 				device_printf(dev, "XXX need workaround\n");
   11238 			}
   11239 		}
   11240 
   11241 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11242 			rv = wm_gmii_mdic_writereg(dev, 1,
   11243 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11244 			if (rv != 0)
   11245 				return rv;
   11246 		}
   11247 	}
   11248 
   11249 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11250 }
   11251 
   11252 /*
   11253  * wm_gmii_82580_readreg:	[mii interface function]
   11254  *
   11255  *	Read a PHY register on the 82580 and I350.
   11256  * This could be handled by the PHY layer if we didn't have to lock the
   11257  * ressource ...
   11258  */
   11259 static int
   11260 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11261 {
   11262 	struct wm_softc *sc = device_private(dev);
   11263 	int rv;
   11264 
   11265 	if (sc->phy.acquire(sc) != 0) {
   11266 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11267 		return -1;
   11268 	}
   11269 
   11270 #ifdef DIAGNOSTIC
   11271 	if (reg > MII_ADDRMASK) {
   11272 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11273 		    __func__, sc->sc_phytype, reg);
   11274 		reg &= MII_ADDRMASK;
   11275 	}
   11276 #endif
   11277 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11278 
   11279 	sc->phy.release(sc);
   11280 	return rv;
   11281 }
   11282 
   11283 /*
   11284  * wm_gmii_82580_writereg:	[mii interface function]
   11285  *
   11286  *	Write a PHY register on the 82580 and I350.
   11287  * This could be handled by the PHY layer if we didn't have to lock the
   11288  * ressource ...
   11289  */
   11290 static int
   11291 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11292 {
   11293 	struct wm_softc *sc = device_private(dev);
   11294 	int rv;
   11295 
   11296 	if (sc->phy.acquire(sc) != 0) {
   11297 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11298 		return -1;
   11299 	}
   11300 
   11301 #ifdef DIAGNOSTIC
   11302 	if (reg > MII_ADDRMASK) {
   11303 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11304 		    __func__, sc->sc_phytype, reg);
   11305 		reg &= MII_ADDRMASK;
   11306 	}
   11307 #endif
   11308 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11309 
   11310 	sc->phy.release(sc);
   11311 	return rv;
   11312 }
   11313 
   11314 /*
   11315  * wm_gmii_gs40g_readreg:	[mii interface function]
   11316  *
   11317  *	Read a PHY register on the I2100 and I211.
   11318  * This could be handled by the PHY layer if we didn't have to lock the
   11319  * ressource ...
   11320  */
   11321 static int
   11322 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11323 {
   11324 	struct wm_softc *sc = device_private(dev);
   11325 	int page, offset;
   11326 	int rv;
   11327 
   11328 	/* Acquire semaphore */
   11329 	if (sc->phy.acquire(sc)) {
   11330 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11331 		return -1;
   11332 	}
   11333 
   11334 	/* Page select */
   11335 	page = reg >> GS40G_PAGE_SHIFT;
   11336 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11337 	if (rv != 0)
   11338 		goto release;
   11339 
   11340 	/* Read reg */
   11341 	offset = reg & GS40G_OFFSET_MASK;
   11342 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11343 
   11344 release:
   11345 	sc->phy.release(sc);
   11346 	return rv;
   11347 }
   11348 
   11349 /*
   11350  * wm_gmii_gs40g_writereg:	[mii interface function]
   11351  *
   11352  *	Write a PHY register on the I210 and I211.
   11353  * This could be handled by the PHY layer if we didn't have to lock the
   11354  * ressource ...
   11355  */
   11356 static int
   11357 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11358 {
   11359 	struct wm_softc *sc = device_private(dev);
   11360 	uint16_t page;
   11361 	int offset, rv;
   11362 
   11363 	/* Acquire semaphore */
   11364 	if (sc->phy.acquire(sc)) {
   11365 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11366 		return -1;
   11367 	}
   11368 
   11369 	/* Page select */
   11370 	page = reg >> GS40G_PAGE_SHIFT;
   11371 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11372 	if (rv != 0)
   11373 		goto release;
   11374 
   11375 	/* Write reg */
   11376 	offset = reg & GS40G_OFFSET_MASK;
   11377 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11378 
   11379 release:
   11380 	/* Release semaphore */
   11381 	sc->phy.release(sc);
   11382 	return rv;
   11383 }
   11384 
   11385 /*
   11386  * wm_gmii_statchg:	[mii interface function]
   11387  *
   11388  *	Callback from MII layer when media changes.
   11389  */
   11390 static void
   11391 wm_gmii_statchg(struct ifnet *ifp)
   11392 {
   11393 	struct wm_softc *sc = ifp->if_softc;
   11394 	struct mii_data *mii = &sc->sc_mii;
   11395 
   11396 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11397 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11398 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11399 
   11400 	/* Get flow control negotiation result. */
   11401 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11402 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11403 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11404 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11405 	}
   11406 
   11407 	if (sc->sc_flowflags & IFM_FLOW) {
   11408 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11409 			sc->sc_ctrl |= CTRL_TFCE;
   11410 			sc->sc_fcrtl |= FCRTL_XONE;
   11411 		}
   11412 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11413 			sc->sc_ctrl |= CTRL_RFCE;
   11414 	}
   11415 
   11416 	if (mii->mii_media_active & IFM_FDX) {
   11417 		DPRINTF(WM_DEBUG_LINK,
   11418 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11419 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11420 	} else {
   11421 		DPRINTF(WM_DEBUG_LINK,
   11422 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11423 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11424 	}
   11425 
   11426 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11427 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11428 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11429 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11430 	if (sc->sc_type == WM_T_80003) {
   11431 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11432 		case IFM_1000_T:
   11433 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11434 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11435 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11436 			break;
   11437 		default:
   11438 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11439 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11440 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11441 			break;
   11442 		}
   11443 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11444 	}
   11445 }
   11446 
   11447 /* kumeran related (80003, ICH* and PCH*) */
   11448 
   11449 /*
   11450  * wm_kmrn_readreg:
   11451  *
   11452  *	Read a kumeran register
   11453  */
   11454 static int
   11455 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11456 {
   11457 	int rv;
   11458 
   11459 	if (sc->sc_type == WM_T_80003)
   11460 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11461 	else
   11462 		rv = sc->phy.acquire(sc);
   11463 	if (rv != 0) {
   11464 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11465 		    __func__);
   11466 		return rv;
   11467 	}
   11468 
   11469 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11470 
   11471 	if (sc->sc_type == WM_T_80003)
   11472 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11473 	else
   11474 		sc->phy.release(sc);
   11475 
   11476 	return rv;
   11477 }
   11478 
   11479 static int
   11480 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11481 {
   11482 
   11483 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11484 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11485 	    KUMCTRLSTA_REN);
   11486 	CSR_WRITE_FLUSH(sc);
   11487 	delay(2);
   11488 
   11489 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11490 
   11491 	return 0;
   11492 }
   11493 
   11494 /*
   11495  * wm_kmrn_writereg:
   11496  *
   11497  *	Write a kumeran register
   11498  */
   11499 static int
   11500 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11501 {
   11502 	int rv;
   11503 
   11504 	if (sc->sc_type == WM_T_80003)
   11505 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11506 	else
   11507 		rv = sc->phy.acquire(sc);
   11508 	if (rv != 0) {
   11509 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11510 		    __func__);
   11511 		return rv;
   11512 	}
   11513 
   11514 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11515 
   11516 	if (sc->sc_type == WM_T_80003)
   11517 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11518 	else
   11519 		sc->phy.release(sc);
   11520 
   11521 	return rv;
   11522 }
   11523 
   11524 static int
   11525 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11526 {
   11527 
   11528 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11529 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11530 
   11531 	return 0;
   11532 }
   11533 
   11534 /*
   11535  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11536  * This access method is different from IEEE MMD.
   11537  */
   11538 static int
   11539 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11540 {
   11541 	struct wm_softc *sc = device_private(dev);
   11542 	int rv;
   11543 
   11544 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11545 	if (rv != 0)
   11546 		return rv;
   11547 
   11548 	if (rd)
   11549 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11550 	else
   11551 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11552 	return rv;
   11553 }
   11554 
   11555 static int
   11556 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11557 {
   11558 
   11559 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11560 }
   11561 
   11562 static int
   11563 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11564 {
   11565 
   11566 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11567 }
   11568 
   11569 /* SGMII related */
   11570 
   11571 /*
   11572  * wm_sgmii_uses_mdio
   11573  *
   11574  * Check whether the transaction is to the internal PHY or the external
   11575  * MDIO interface. Return true if it's MDIO.
   11576  */
   11577 static bool
   11578 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11579 {
   11580 	uint32_t reg;
   11581 	bool ismdio = false;
   11582 
   11583 	switch (sc->sc_type) {
   11584 	case WM_T_82575:
   11585 	case WM_T_82576:
   11586 		reg = CSR_READ(sc, WMREG_MDIC);
   11587 		ismdio = ((reg & MDIC_DEST) != 0);
   11588 		break;
   11589 	case WM_T_82580:
   11590 	case WM_T_I350:
   11591 	case WM_T_I354:
   11592 	case WM_T_I210:
   11593 	case WM_T_I211:
   11594 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11595 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11596 		break;
   11597 	default:
   11598 		break;
   11599 	}
   11600 
   11601 	return ismdio;
   11602 }
   11603 
   11604 /*
   11605  * wm_sgmii_readreg:	[mii interface function]
   11606  *
   11607  *	Read a PHY register on the SGMII
   11608  * This could be handled by the PHY layer if we didn't have to lock the
   11609  * ressource ...
   11610  */
   11611 static int
   11612 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11613 {
   11614 	struct wm_softc *sc = device_private(dev);
   11615 	int rv;
   11616 
   11617 	if (sc->phy.acquire(sc)) {
   11618 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11619 		return -1;
   11620 	}
   11621 
   11622 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11623 
   11624 	sc->phy.release(sc);
   11625 	return rv;
   11626 }
   11627 
   11628 static int
   11629 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11630 {
   11631 	struct wm_softc *sc = device_private(dev);
   11632 	uint32_t i2ccmd;
   11633 	int i, rv = 0;
   11634 
   11635 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11636 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11637 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11638 
   11639 	/* Poll the ready bit */
   11640 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11641 		delay(50);
   11642 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11643 		if (i2ccmd & I2CCMD_READY)
   11644 			break;
   11645 	}
   11646 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11647 		device_printf(dev, "I2CCMD Read did not complete\n");
   11648 		rv = ETIMEDOUT;
   11649 	}
   11650 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11651 		device_printf(dev, "I2CCMD Error bit set\n");
   11652 		rv = EIO;
   11653 	}
   11654 
   11655 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11656 
   11657 	return rv;
   11658 }
   11659 
   11660 /*
   11661  * wm_sgmii_writereg:	[mii interface function]
   11662  *
   11663  *	Write a PHY register on the SGMII.
   11664  * This could be handled by the PHY layer if we didn't have to lock the
   11665  * ressource ...
   11666  */
   11667 static int
   11668 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11669 {
   11670 	struct wm_softc *sc = device_private(dev);
   11671 	int rv;
   11672 
   11673 	if (sc->phy.acquire(sc) != 0) {
   11674 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11675 		return -1;
   11676 	}
   11677 
   11678 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11679 
   11680 	sc->phy.release(sc);
   11681 
   11682 	return rv;
   11683 }
   11684 
   11685 static int
   11686 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11687 {
   11688 	struct wm_softc *sc = device_private(dev);
   11689 	uint32_t i2ccmd;
   11690 	uint16_t swapdata;
   11691 	int rv = 0;
   11692 	int i;
   11693 
   11694 	/* Swap the data bytes for the I2C interface */
   11695 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11696 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11697 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11698 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11699 
   11700 	/* Poll the ready bit */
   11701 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11702 		delay(50);
   11703 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11704 		if (i2ccmd & I2CCMD_READY)
   11705 			break;
   11706 	}
   11707 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11708 		device_printf(dev, "I2CCMD Write did not complete\n");
   11709 		rv = ETIMEDOUT;
   11710 	}
   11711 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11712 		device_printf(dev, "I2CCMD Error bit set\n");
   11713 		rv = EIO;
   11714 	}
   11715 
   11716 	return rv;
   11717 }
   11718 
   11719 /* TBI related */
   11720 
   11721 static bool
   11722 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11723 {
   11724 	bool sig;
   11725 
   11726 	sig = ctrl & CTRL_SWDPIN(1);
   11727 
   11728 	/*
   11729 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11730 	 * detect a signal, 1 if they don't.
   11731 	 */
   11732 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11733 		sig = !sig;
   11734 
   11735 	return sig;
   11736 }
   11737 
   11738 /*
   11739  * wm_tbi_mediainit:
   11740  *
   11741  *	Initialize media for use on 1000BASE-X devices.
   11742  */
   11743 static void
   11744 wm_tbi_mediainit(struct wm_softc *sc)
   11745 {
   11746 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11747 	const char *sep = "";
   11748 
   11749 	if (sc->sc_type < WM_T_82543)
   11750 		sc->sc_tipg = TIPG_WM_DFLT;
   11751 	else
   11752 		sc->sc_tipg = TIPG_LG_DFLT;
   11753 
   11754 	sc->sc_tbi_serdes_anegticks = 5;
   11755 
   11756 	/* Initialize our media structures */
   11757 	sc->sc_mii.mii_ifp = ifp;
   11758 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11759 
   11760 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11761 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11762 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11763 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11764 	else
   11765 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11766 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11767 
   11768 	/*
   11769 	 * SWD Pins:
   11770 	 *
   11771 	 *	0 = Link LED (output)
   11772 	 *	1 = Loss Of Signal (input)
   11773 	 */
   11774 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11775 
   11776 	/* XXX Perhaps this is only for TBI */
   11777 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11778 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11779 
   11780 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11781 		sc->sc_ctrl &= ~CTRL_LRST;
   11782 
   11783 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11784 
   11785 #define	ADD(ss, mm, dd)							\
   11786 do {									\
   11787 	aprint_normal("%s%s", sep, ss);					\
   11788 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11789 	sep = ", ";							\
   11790 } while (/*CONSTCOND*/0)
   11791 
   11792 	aprint_normal_dev(sc->sc_dev, "");
   11793 
   11794 	if (sc->sc_type == WM_T_I354) {
   11795 		uint32_t status;
   11796 
   11797 		status = CSR_READ(sc, WMREG_STATUS);
   11798 		if (((status & STATUS_2P5_SKU) != 0)
   11799 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11800 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11801 		} else
   11802 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11803 	} else if (sc->sc_type == WM_T_82545) {
   11804 		/* Only 82545 is LX (XXX except SFP) */
   11805 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11806 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11807 	} else {
   11808 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11809 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11810 	}
   11811 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11812 	aprint_normal("\n");
   11813 
   11814 #undef ADD
   11815 
   11816 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11817 }
   11818 
   11819 /*
   11820  * wm_tbi_mediachange:	[ifmedia interface function]
   11821  *
   11822  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11823  */
   11824 static int
   11825 wm_tbi_mediachange(struct ifnet *ifp)
   11826 {
   11827 	struct wm_softc *sc = ifp->if_softc;
   11828 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11829 	uint32_t status, ctrl;
   11830 	bool signal;
   11831 	int i;
   11832 
   11833 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11834 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11835 		/* XXX need some work for >= 82571 and < 82575 */
   11836 		if (sc->sc_type < WM_T_82575)
   11837 			return 0;
   11838 	}
   11839 
   11840 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11841 	    || (sc->sc_type >= WM_T_82575))
   11842 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11843 
   11844 	sc->sc_ctrl &= ~CTRL_LRST;
   11845 	sc->sc_txcw = TXCW_ANE;
   11846 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11847 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11848 	else if (ife->ifm_media & IFM_FDX)
   11849 		sc->sc_txcw |= TXCW_FD;
   11850 	else
   11851 		sc->sc_txcw |= TXCW_HD;
   11852 
   11853 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11854 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11855 
   11856 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11857 		device_xname(sc->sc_dev), sc->sc_txcw));
   11858 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11859 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11860 	CSR_WRITE_FLUSH(sc);
   11861 	delay(1000);
   11862 
   11863 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11864 	signal = wm_tbi_havesignal(sc, ctrl);
   11865 
   11866 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11867 		signal));
   11868 
   11869 	if (signal) {
   11870 		/* Have signal; wait for the link to come up. */
   11871 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11872 			delay(10000);
   11873 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11874 				break;
   11875 		}
   11876 
   11877 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11878 			device_xname(sc->sc_dev), i));
   11879 
   11880 		status = CSR_READ(sc, WMREG_STATUS);
   11881 		DPRINTF(WM_DEBUG_LINK,
   11882 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11883 			device_xname(sc->sc_dev), status, STATUS_LU));
   11884 		if (status & STATUS_LU) {
   11885 			/* Link is up. */
   11886 			DPRINTF(WM_DEBUG_LINK,
   11887 			    ("%s: LINK: set media -> link up %s\n",
   11888 				device_xname(sc->sc_dev),
   11889 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11890 
   11891 			/*
   11892 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11893 			 * so we should update sc->sc_ctrl
   11894 			 */
   11895 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11896 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11897 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11898 			if (status & STATUS_FD)
   11899 				sc->sc_tctl |=
   11900 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11901 			else
   11902 				sc->sc_tctl |=
   11903 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11904 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11905 				sc->sc_fcrtl |= FCRTL_XONE;
   11906 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11907 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11908 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11909 			sc->sc_tbi_linkup = 1;
   11910 		} else {
   11911 			if (i == WM_LINKUP_TIMEOUT)
   11912 				wm_check_for_link(sc);
   11913 			/* Link is down. */
   11914 			DPRINTF(WM_DEBUG_LINK,
   11915 			    ("%s: LINK: set media -> link down\n",
   11916 				device_xname(sc->sc_dev)));
   11917 			sc->sc_tbi_linkup = 0;
   11918 		}
   11919 	} else {
   11920 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11921 			device_xname(sc->sc_dev)));
   11922 		sc->sc_tbi_linkup = 0;
   11923 	}
   11924 
   11925 	wm_tbi_serdes_set_linkled(sc);
   11926 
   11927 	return 0;
   11928 }
   11929 
   11930 /*
   11931  * wm_tbi_mediastatus:	[ifmedia interface function]
   11932  *
   11933  *	Get the current interface media status on a 1000BASE-X device.
   11934  */
   11935 static void
   11936 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11937 {
   11938 	struct wm_softc *sc = ifp->if_softc;
   11939 	uint32_t ctrl, status;
   11940 
   11941 	ifmr->ifm_status = IFM_AVALID;
   11942 	ifmr->ifm_active = IFM_ETHER;
   11943 
   11944 	status = CSR_READ(sc, WMREG_STATUS);
   11945 	if ((status & STATUS_LU) == 0) {
   11946 		ifmr->ifm_active |= IFM_NONE;
   11947 		return;
   11948 	}
   11949 
   11950 	ifmr->ifm_status |= IFM_ACTIVE;
   11951 	/* Only 82545 is LX */
   11952 	if (sc->sc_type == WM_T_82545)
   11953 		ifmr->ifm_active |= IFM_1000_LX;
   11954 	else
   11955 		ifmr->ifm_active |= IFM_1000_SX;
   11956 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11957 		ifmr->ifm_active |= IFM_FDX;
   11958 	else
   11959 		ifmr->ifm_active |= IFM_HDX;
   11960 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11961 	if (ctrl & CTRL_RFCE)
   11962 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11963 	if (ctrl & CTRL_TFCE)
   11964 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11965 }
   11966 
   11967 /* XXX TBI only */
   11968 static int
   11969 wm_check_for_link(struct wm_softc *sc)
   11970 {
   11971 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11972 	uint32_t rxcw;
   11973 	uint32_t ctrl;
   11974 	uint32_t status;
   11975 	bool signal;
   11976 
   11977 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11978 		device_xname(sc->sc_dev), __func__));
   11979 
   11980 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11981 		/* XXX need some work for >= 82571 */
   11982 		if (sc->sc_type >= WM_T_82571) {
   11983 			sc->sc_tbi_linkup = 1;
   11984 			return 0;
   11985 		}
   11986 	}
   11987 
   11988 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11989 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11990 	status = CSR_READ(sc, WMREG_STATUS);
   11991 	signal = wm_tbi_havesignal(sc, ctrl);
   11992 
   11993 	DPRINTF(WM_DEBUG_LINK,
   11994 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11995 		device_xname(sc->sc_dev), __func__, signal,
   11996 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11997 
   11998 	/*
   11999 	 * SWDPIN   LU RXCW
   12000 	 *	0    0	  0
   12001 	 *	0    0	  1	(should not happen)
   12002 	 *	0    1	  0	(should not happen)
   12003 	 *	0    1	  1	(should not happen)
   12004 	 *	1    0	  0	Disable autonego and force linkup
   12005 	 *	1    0	  1	got /C/ but not linkup yet
   12006 	 *	1    1	  0	(linkup)
   12007 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12008 	 *
   12009 	 */
   12010 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12011 		DPRINTF(WM_DEBUG_LINK,
   12012 		    ("%s: %s: force linkup and fullduplex\n",
   12013 			device_xname(sc->sc_dev), __func__));
   12014 		sc->sc_tbi_linkup = 0;
   12015 		/* Disable auto-negotiation in the TXCW register */
   12016 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12017 
   12018 		/*
   12019 		 * Force link-up and also force full-duplex.
   12020 		 *
   12021 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12022 		 * so we should update sc->sc_ctrl
   12023 		 */
   12024 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12025 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12026 	} else if (((status & STATUS_LU) != 0)
   12027 	    && ((rxcw & RXCW_C) != 0)
   12028 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12029 		sc->sc_tbi_linkup = 1;
   12030 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12031 			device_xname(sc->sc_dev),
   12032 			__func__));
   12033 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12034 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12035 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12036 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12037 			device_xname(sc->sc_dev), __func__));
   12038 	} else {
   12039 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12040 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12041 			status));
   12042 	}
   12043 
   12044 	return 0;
   12045 }
   12046 
   12047 /*
   12048  * wm_tbi_tick:
   12049  *
   12050  *	Check the link on TBI devices.
   12051  *	This function acts as mii_tick().
   12052  */
   12053 static void
   12054 wm_tbi_tick(struct wm_softc *sc)
   12055 {
   12056 	struct mii_data *mii = &sc->sc_mii;
   12057 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12058 	uint32_t status;
   12059 
   12060 	KASSERT(WM_CORE_LOCKED(sc));
   12061 
   12062 	status = CSR_READ(sc, WMREG_STATUS);
   12063 
   12064 	/* XXX is this needed? */
   12065 	(void)CSR_READ(sc, WMREG_RXCW);
   12066 	(void)CSR_READ(sc, WMREG_CTRL);
   12067 
   12068 	/* set link status */
   12069 	if ((status & STATUS_LU) == 0) {
   12070 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12071 			device_xname(sc->sc_dev)));
   12072 		sc->sc_tbi_linkup = 0;
   12073 	} else if (sc->sc_tbi_linkup == 0) {
   12074 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12075 			device_xname(sc->sc_dev),
   12076 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12077 		sc->sc_tbi_linkup = 1;
   12078 		sc->sc_tbi_serdes_ticks = 0;
   12079 	}
   12080 
   12081 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12082 		goto setled;
   12083 
   12084 	if ((status & STATUS_LU) == 0) {
   12085 		sc->sc_tbi_linkup = 0;
   12086 		/* If the timer expired, retry autonegotiation */
   12087 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12088 		    && (++sc->sc_tbi_serdes_ticks
   12089 			>= sc->sc_tbi_serdes_anegticks)) {
   12090 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12091 				device_xname(sc->sc_dev), __func__));
   12092 			sc->sc_tbi_serdes_ticks = 0;
   12093 			/*
   12094 			 * Reset the link, and let autonegotiation do
   12095 			 * its thing
   12096 			 */
   12097 			sc->sc_ctrl |= CTRL_LRST;
   12098 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12099 			CSR_WRITE_FLUSH(sc);
   12100 			delay(1000);
   12101 			sc->sc_ctrl &= ~CTRL_LRST;
   12102 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12103 			CSR_WRITE_FLUSH(sc);
   12104 			delay(1000);
   12105 			CSR_WRITE(sc, WMREG_TXCW,
   12106 			    sc->sc_txcw & ~TXCW_ANE);
   12107 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12108 		}
   12109 	}
   12110 
   12111 setled:
   12112 	wm_tbi_serdes_set_linkled(sc);
   12113 }
   12114 
   12115 /* SERDES related */
   12116 static void
   12117 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12118 {
   12119 	uint32_t reg;
   12120 
   12121 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12122 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12123 		return;
   12124 
   12125 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12126 	reg |= PCS_CFG_PCS_EN;
   12127 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12128 
   12129 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12130 	reg &= ~CTRL_EXT_SWDPIN(3);
   12131 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12132 	CSR_WRITE_FLUSH(sc);
   12133 }
   12134 
   12135 static int
   12136 wm_serdes_mediachange(struct ifnet *ifp)
   12137 {
   12138 	struct wm_softc *sc = ifp->if_softc;
   12139 	bool pcs_autoneg = true; /* XXX */
   12140 	uint32_t ctrl_ext, pcs_lctl, reg;
   12141 
   12142 	/* XXX Currently, this function is not called on 8257[12] */
   12143 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12144 	    || (sc->sc_type >= WM_T_82575))
   12145 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12146 
   12147 	wm_serdes_power_up_link_82575(sc);
   12148 
   12149 	sc->sc_ctrl |= CTRL_SLU;
   12150 
   12151 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12152 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12153 
   12154 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12155 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12156 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12157 	case CTRL_EXT_LINK_MODE_SGMII:
   12158 		pcs_autoneg = true;
   12159 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12160 		break;
   12161 	case CTRL_EXT_LINK_MODE_1000KX:
   12162 		pcs_autoneg = false;
   12163 		/* FALLTHROUGH */
   12164 	default:
   12165 		if ((sc->sc_type == WM_T_82575)
   12166 		    || (sc->sc_type == WM_T_82576)) {
   12167 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12168 				pcs_autoneg = false;
   12169 		}
   12170 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12171 		    | CTRL_FRCFDX;
   12172 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12173 	}
   12174 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12175 
   12176 	if (pcs_autoneg) {
   12177 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12178 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12179 
   12180 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12181 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12182 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12183 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12184 	} else
   12185 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12186 
   12187 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12188 
   12189 
   12190 	return 0;
   12191 }
   12192 
   12193 static void
   12194 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12195 {
   12196 	struct wm_softc *sc = ifp->if_softc;
   12197 	struct mii_data *mii = &sc->sc_mii;
   12198 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12199 	uint32_t pcs_adv, pcs_lpab, reg;
   12200 
   12201 	ifmr->ifm_status = IFM_AVALID;
   12202 	ifmr->ifm_active = IFM_ETHER;
   12203 
   12204 	/* Check PCS */
   12205 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12206 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12207 		ifmr->ifm_active |= IFM_NONE;
   12208 		sc->sc_tbi_linkup = 0;
   12209 		goto setled;
   12210 	}
   12211 
   12212 	sc->sc_tbi_linkup = 1;
   12213 	ifmr->ifm_status |= IFM_ACTIVE;
   12214 	if (sc->sc_type == WM_T_I354) {
   12215 		uint32_t status;
   12216 
   12217 		status = CSR_READ(sc, WMREG_STATUS);
   12218 		if (((status & STATUS_2P5_SKU) != 0)
   12219 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12220 			ifmr->ifm_active |= IFM_2500_KX;
   12221 		} else
   12222 			ifmr->ifm_active |= IFM_1000_KX;
   12223 	} else {
   12224 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12225 		case PCS_LSTS_SPEED_10:
   12226 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12227 			break;
   12228 		case PCS_LSTS_SPEED_100:
   12229 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12230 			break;
   12231 		case PCS_LSTS_SPEED_1000:
   12232 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12233 			break;
   12234 		default:
   12235 			device_printf(sc->sc_dev, "Unknown speed\n");
   12236 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12237 			break;
   12238 		}
   12239 	}
   12240 	if ((reg & PCS_LSTS_FDX) != 0)
   12241 		ifmr->ifm_active |= IFM_FDX;
   12242 	else
   12243 		ifmr->ifm_active |= IFM_HDX;
   12244 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12245 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12246 		/* Check flow */
   12247 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12248 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12249 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12250 			goto setled;
   12251 		}
   12252 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12253 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12254 		DPRINTF(WM_DEBUG_LINK,
   12255 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12256 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12257 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12258 			mii->mii_media_active |= IFM_FLOW
   12259 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12260 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12261 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12262 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12263 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12264 			mii->mii_media_active |= IFM_FLOW
   12265 			    | IFM_ETH_TXPAUSE;
   12266 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12267 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12268 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12269 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12270 			mii->mii_media_active |= IFM_FLOW
   12271 			    | IFM_ETH_RXPAUSE;
   12272 		}
   12273 	}
   12274 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12275 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12276 setled:
   12277 	wm_tbi_serdes_set_linkled(sc);
   12278 }
   12279 
   12280 /*
   12281  * wm_serdes_tick:
   12282  *
   12283  *	Check the link on serdes devices.
   12284  */
   12285 static void
   12286 wm_serdes_tick(struct wm_softc *sc)
   12287 {
   12288 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12289 	struct mii_data *mii = &sc->sc_mii;
   12290 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12291 	uint32_t reg;
   12292 
   12293 	KASSERT(WM_CORE_LOCKED(sc));
   12294 
   12295 	mii->mii_media_status = IFM_AVALID;
   12296 	mii->mii_media_active = IFM_ETHER;
   12297 
   12298 	/* Check PCS */
   12299 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12300 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12301 		mii->mii_media_status |= IFM_ACTIVE;
   12302 		sc->sc_tbi_linkup = 1;
   12303 		sc->sc_tbi_serdes_ticks = 0;
   12304 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12305 		if ((reg & PCS_LSTS_FDX) != 0)
   12306 			mii->mii_media_active |= IFM_FDX;
   12307 		else
   12308 			mii->mii_media_active |= IFM_HDX;
   12309 	} else {
   12310 		mii->mii_media_status |= IFM_NONE;
   12311 		sc->sc_tbi_linkup = 0;
   12312 		/* If the timer expired, retry autonegotiation */
   12313 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12314 		    && (++sc->sc_tbi_serdes_ticks
   12315 			>= sc->sc_tbi_serdes_anegticks)) {
   12316 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12317 				device_xname(sc->sc_dev), __func__));
   12318 			sc->sc_tbi_serdes_ticks = 0;
   12319 			/* XXX */
   12320 			wm_serdes_mediachange(ifp);
   12321 		}
   12322 	}
   12323 
   12324 	wm_tbi_serdes_set_linkled(sc);
   12325 }
   12326 
   12327 /* SFP related */
   12328 
   12329 static int
   12330 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12331 {
   12332 	uint32_t i2ccmd;
   12333 	int i;
   12334 
   12335 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12336 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12337 
   12338 	/* Poll the ready bit */
   12339 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12340 		delay(50);
   12341 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12342 		if (i2ccmd & I2CCMD_READY)
   12343 			break;
   12344 	}
   12345 	if ((i2ccmd & I2CCMD_READY) == 0)
   12346 		return -1;
   12347 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12348 		return -1;
   12349 
   12350 	*data = i2ccmd & 0x00ff;
   12351 
   12352 	return 0;
   12353 }
   12354 
   12355 static uint32_t
   12356 wm_sfp_get_media_type(struct wm_softc *sc)
   12357 {
   12358 	uint32_t ctrl_ext;
   12359 	uint8_t val = 0;
   12360 	int timeout = 3;
   12361 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12362 	int rv = -1;
   12363 
   12364 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12365 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12366 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12367 	CSR_WRITE_FLUSH(sc);
   12368 
   12369 	/* Read SFP module data */
   12370 	while (timeout) {
   12371 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12372 		if (rv == 0)
   12373 			break;
   12374 		delay(100*1000); /* XXX too big */
   12375 		timeout--;
   12376 	}
   12377 	if (rv != 0)
   12378 		goto out;
   12379 
   12380 	switch (val) {
   12381 	case SFF_SFP_ID_SFF:
   12382 		aprint_normal_dev(sc->sc_dev,
   12383 		    "Module/Connector soldered to board\n");
   12384 		break;
   12385 	case SFF_SFP_ID_SFP:
   12386 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   12387 		break;
   12388 	case SFF_SFP_ID_UNKNOWN:
   12389 		goto out;
   12390 	default:
   12391 		break;
   12392 	}
   12393 
   12394 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12395 	if (rv != 0)
   12396 		goto out;
   12397 
   12398 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12399 		mediatype = WM_MEDIATYPE_SERDES;
   12400 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12401 		sc->sc_flags |= WM_F_SGMII;
   12402 		mediatype = WM_MEDIATYPE_COPPER;
   12403 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12404 		sc->sc_flags |= WM_F_SGMII;
   12405 		mediatype = WM_MEDIATYPE_SERDES;
   12406 	}
   12407 
   12408 out:
   12409 	/* Restore I2C interface setting */
   12410 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12411 
   12412 	return mediatype;
   12413 }
   12414 
   12415 /*
   12416  * NVM related.
   12417  * Microwire, SPI (w/wo EERD) and Flash.
   12418  */
   12419 
   12420 /* Both spi and uwire */
   12421 
   12422 /*
   12423  * wm_eeprom_sendbits:
   12424  *
   12425  *	Send a series of bits to the EEPROM.
   12426  */
   12427 static void
   12428 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12429 {
   12430 	uint32_t reg;
   12431 	int x;
   12432 
   12433 	reg = CSR_READ(sc, WMREG_EECD);
   12434 
   12435 	for (x = nbits; x > 0; x--) {
   12436 		if (bits & (1U << (x - 1)))
   12437 			reg |= EECD_DI;
   12438 		else
   12439 			reg &= ~EECD_DI;
   12440 		CSR_WRITE(sc, WMREG_EECD, reg);
   12441 		CSR_WRITE_FLUSH(sc);
   12442 		delay(2);
   12443 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12444 		CSR_WRITE_FLUSH(sc);
   12445 		delay(2);
   12446 		CSR_WRITE(sc, WMREG_EECD, reg);
   12447 		CSR_WRITE_FLUSH(sc);
   12448 		delay(2);
   12449 	}
   12450 }
   12451 
   12452 /*
   12453  * wm_eeprom_recvbits:
   12454  *
   12455  *	Receive a series of bits from the EEPROM.
   12456  */
   12457 static void
   12458 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12459 {
   12460 	uint32_t reg, val;
   12461 	int x;
   12462 
   12463 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12464 
   12465 	val = 0;
   12466 	for (x = nbits; x > 0; x--) {
   12467 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12468 		CSR_WRITE_FLUSH(sc);
   12469 		delay(2);
   12470 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12471 			val |= (1U << (x - 1));
   12472 		CSR_WRITE(sc, WMREG_EECD, reg);
   12473 		CSR_WRITE_FLUSH(sc);
   12474 		delay(2);
   12475 	}
   12476 	*valp = val;
   12477 }
   12478 
   12479 /* Microwire */
   12480 
   12481 /*
   12482  * wm_nvm_read_uwire:
   12483  *
   12484  *	Read a word from the EEPROM using the MicroWire protocol.
   12485  */
   12486 static int
   12487 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12488 {
   12489 	uint32_t reg, val;
   12490 	int i;
   12491 
   12492 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12493 		device_xname(sc->sc_dev), __func__));
   12494 
   12495 	if (sc->nvm.acquire(sc) != 0)
   12496 		return -1;
   12497 
   12498 	for (i = 0; i < wordcnt; i++) {
   12499 		/* Clear SK and DI. */
   12500 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12501 		CSR_WRITE(sc, WMREG_EECD, reg);
   12502 
   12503 		/*
   12504 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12505 		 * and Xen.
   12506 		 *
   12507 		 * We use this workaround only for 82540 because qemu's
   12508 		 * e1000 act as 82540.
   12509 		 */
   12510 		if (sc->sc_type == WM_T_82540) {
   12511 			reg |= EECD_SK;
   12512 			CSR_WRITE(sc, WMREG_EECD, reg);
   12513 			reg &= ~EECD_SK;
   12514 			CSR_WRITE(sc, WMREG_EECD, reg);
   12515 			CSR_WRITE_FLUSH(sc);
   12516 			delay(2);
   12517 		}
   12518 		/* XXX: end of workaround */
   12519 
   12520 		/* Set CHIP SELECT. */
   12521 		reg |= EECD_CS;
   12522 		CSR_WRITE(sc, WMREG_EECD, reg);
   12523 		CSR_WRITE_FLUSH(sc);
   12524 		delay(2);
   12525 
   12526 		/* Shift in the READ command. */
   12527 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12528 
   12529 		/* Shift in address. */
   12530 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12531 
   12532 		/* Shift out the data. */
   12533 		wm_eeprom_recvbits(sc, &val, 16);
   12534 		data[i] = val & 0xffff;
   12535 
   12536 		/* Clear CHIP SELECT. */
   12537 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12538 		CSR_WRITE(sc, WMREG_EECD, reg);
   12539 		CSR_WRITE_FLUSH(sc);
   12540 		delay(2);
   12541 	}
   12542 
   12543 	sc->nvm.release(sc);
   12544 	return 0;
   12545 }
   12546 
   12547 /* SPI */
   12548 
   12549 /*
   12550  * Set SPI and FLASH related information from the EECD register.
   12551  * For 82541 and 82547, the word size is taken from EEPROM.
   12552  */
   12553 static int
   12554 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12555 {
   12556 	int size;
   12557 	uint32_t reg;
   12558 	uint16_t data;
   12559 
   12560 	reg = CSR_READ(sc, WMREG_EECD);
   12561 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12562 
   12563 	/* Read the size of NVM from EECD by default */
   12564 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12565 	switch (sc->sc_type) {
   12566 	case WM_T_82541:
   12567 	case WM_T_82541_2:
   12568 	case WM_T_82547:
   12569 	case WM_T_82547_2:
   12570 		/* Set dummy value to access EEPROM */
   12571 		sc->sc_nvm_wordsize = 64;
   12572 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12573 			aprint_error_dev(sc->sc_dev,
   12574 			    "%s: failed to read EEPROM size\n", __func__);
   12575 		}
   12576 		reg = data;
   12577 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12578 		if (size == 0)
   12579 			size = 6; /* 64 word size */
   12580 		else
   12581 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12582 		break;
   12583 	case WM_T_80003:
   12584 	case WM_T_82571:
   12585 	case WM_T_82572:
   12586 	case WM_T_82573: /* SPI case */
   12587 	case WM_T_82574: /* SPI case */
   12588 	case WM_T_82583: /* SPI case */
   12589 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12590 		if (size > 14)
   12591 			size = 14;
   12592 		break;
   12593 	case WM_T_82575:
   12594 	case WM_T_82576:
   12595 	case WM_T_82580:
   12596 	case WM_T_I350:
   12597 	case WM_T_I354:
   12598 	case WM_T_I210:
   12599 	case WM_T_I211:
   12600 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12601 		if (size > 15)
   12602 			size = 15;
   12603 		break;
   12604 	default:
   12605 		aprint_error_dev(sc->sc_dev,
   12606 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12607 		return -1;
   12608 		break;
   12609 	}
   12610 
   12611 	sc->sc_nvm_wordsize = 1 << size;
   12612 
   12613 	return 0;
   12614 }
   12615 
   12616 /*
   12617  * wm_nvm_ready_spi:
   12618  *
   12619  *	Wait for a SPI EEPROM to be ready for commands.
   12620  */
   12621 static int
   12622 wm_nvm_ready_spi(struct wm_softc *sc)
   12623 {
   12624 	uint32_t val;
   12625 	int usec;
   12626 
   12627 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12628 		device_xname(sc->sc_dev), __func__));
   12629 
   12630 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12631 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12632 		wm_eeprom_recvbits(sc, &val, 8);
   12633 		if ((val & SPI_SR_RDY) == 0)
   12634 			break;
   12635 	}
   12636 	if (usec >= SPI_MAX_RETRIES) {
   12637 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12638 		return -1;
   12639 	}
   12640 	return 0;
   12641 }
   12642 
   12643 /*
   12644  * wm_nvm_read_spi:
   12645  *
   12646  *	Read a work from the EEPROM using the SPI protocol.
   12647  */
   12648 static int
   12649 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12650 {
   12651 	uint32_t reg, val;
   12652 	int i;
   12653 	uint8_t opc;
   12654 	int rv = 0;
   12655 
   12656 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12657 		device_xname(sc->sc_dev), __func__));
   12658 
   12659 	if (sc->nvm.acquire(sc) != 0)
   12660 		return -1;
   12661 
   12662 	/* Clear SK and CS. */
   12663 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12664 	CSR_WRITE(sc, WMREG_EECD, reg);
   12665 	CSR_WRITE_FLUSH(sc);
   12666 	delay(2);
   12667 
   12668 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12669 		goto out;
   12670 
   12671 	/* Toggle CS to flush commands. */
   12672 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12673 	CSR_WRITE_FLUSH(sc);
   12674 	delay(2);
   12675 	CSR_WRITE(sc, WMREG_EECD, reg);
   12676 	CSR_WRITE_FLUSH(sc);
   12677 	delay(2);
   12678 
   12679 	opc = SPI_OPC_READ;
   12680 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12681 		opc |= SPI_OPC_A8;
   12682 
   12683 	wm_eeprom_sendbits(sc, opc, 8);
   12684 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12685 
   12686 	for (i = 0; i < wordcnt; i++) {
   12687 		wm_eeprom_recvbits(sc, &val, 16);
   12688 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12689 	}
   12690 
   12691 	/* Raise CS and clear SK. */
   12692 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12693 	CSR_WRITE(sc, WMREG_EECD, reg);
   12694 	CSR_WRITE_FLUSH(sc);
   12695 	delay(2);
   12696 
   12697 out:
   12698 	sc->nvm.release(sc);
   12699 	return rv;
   12700 }
   12701 
   12702 /* Using with EERD */
   12703 
   12704 static int
   12705 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12706 {
   12707 	uint32_t attempts = 100000;
   12708 	uint32_t i, reg = 0;
   12709 	int32_t done = -1;
   12710 
   12711 	for (i = 0; i < attempts; i++) {
   12712 		reg = CSR_READ(sc, rw);
   12713 
   12714 		if (reg & EERD_DONE) {
   12715 			done = 0;
   12716 			break;
   12717 		}
   12718 		delay(5);
   12719 	}
   12720 
   12721 	return done;
   12722 }
   12723 
   12724 static int
   12725 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12726 {
   12727 	int i, eerd = 0;
   12728 	int rv = 0;
   12729 
   12730 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12731 		device_xname(sc->sc_dev), __func__));
   12732 
   12733 	if (sc->nvm.acquire(sc) != 0)
   12734 		return -1;
   12735 
   12736 	for (i = 0; i < wordcnt; i++) {
   12737 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12738 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12739 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12740 		if (rv != 0) {
   12741 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12742 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12743 			break;
   12744 		}
   12745 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12746 	}
   12747 
   12748 	sc->nvm.release(sc);
   12749 	return rv;
   12750 }
   12751 
   12752 /* Flash */
   12753 
   12754 static int
   12755 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12756 {
   12757 	uint32_t eecd;
   12758 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12759 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12760 	uint32_t nvm_dword = 0;
   12761 	uint8_t sig_byte = 0;
   12762 	int rv;
   12763 
   12764 	switch (sc->sc_type) {
   12765 	case WM_T_PCH_SPT:
   12766 	case WM_T_PCH_CNP:
   12767 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12768 		act_offset = ICH_NVM_SIG_WORD * 2;
   12769 
   12770 		/* Set bank to 0 in case flash read fails. */
   12771 		*bank = 0;
   12772 
   12773 		/* Check bank 0 */
   12774 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12775 		if (rv != 0)
   12776 			return rv;
   12777 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12778 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12779 			*bank = 0;
   12780 			return 0;
   12781 		}
   12782 
   12783 		/* Check bank 1 */
   12784 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12785 		    &nvm_dword);
   12786 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12787 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12788 			*bank = 1;
   12789 			return 0;
   12790 		}
   12791 		aprint_error_dev(sc->sc_dev,
   12792 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12793 		return -1;
   12794 	case WM_T_ICH8:
   12795 	case WM_T_ICH9:
   12796 		eecd = CSR_READ(sc, WMREG_EECD);
   12797 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12798 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12799 			return 0;
   12800 		}
   12801 		/* FALLTHROUGH */
   12802 	default:
   12803 		/* Default to 0 */
   12804 		*bank = 0;
   12805 
   12806 		/* Check bank 0 */
   12807 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12808 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12809 			*bank = 0;
   12810 			return 0;
   12811 		}
   12812 
   12813 		/* Check bank 1 */
   12814 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12815 		    &sig_byte);
   12816 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12817 			*bank = 1;
   12818 			return 0;
   12819 		}
   12820 	}
   12821 
   12822 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12823 		device_xname(sc->sc_dev)));
   12824 	return -1;
   12825 }
   12826 
   12827 /******************************************************************************
   12828  * This function does initial flash setup so that a new read/write/erase cycle
   12829  * can be started.
   12830  *
   12831  * sc - The pointer to the hw structure
   12832  ****************************************************************************/
   12833 static int32_t
   12834 wm_ich8_cycle_init(struct wm_softc *sc)
   12835 {
   12836 	uint16_t hsfsts;
   12837 	int32_t error = 1;
   12838 	int32_t i     = 0;
   12839 
   12840 	if (sc->sc_type >= WM_T_PCH_SPT)
   12841 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12842 	else
   12843 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12844 
   12845 	/* May be check the Flash Des Valid bit in Hw status */
   12846 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12847 		return error;
   12848 
   12849 	/* Clear FCERR in Hw status by writing 1 */
   12850 	/* Clear DAEL in Hw status by writing a 1 */
   12851 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12852 
   12853 	if (sc->sc_type >= WM_T_PCH_SPT)
   12854 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12855 	else
   12856 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12857 
   12858 	/*
   12859 	 * Either we should have a hardware SPI cycle in progress bit to check
   12860 	 * against, in order to start a new cycle or FDONE bit should be
   12861 	 * changed in the hardware so that it is 1 after hardware reset, which
   12862 	 * can then be used as an indication whether a cycle is in progress or
   12863 	 * has been completed .. we should also have some software semaphore
   12864 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12865 	 * threads access to those bits can be sequentiallized or a way so that
   12866 	 * 2 threads don't start the cycle at the same time
   12867 	 */
   12868 
   12869 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12870 		/*
   12871 		 * There is no cycle running at present, so we can start a
   12872 		 * cycle
   12873 		 */
   12874 
   12875 		/* Begin by setting Flash Cycle Done. */
   12876 		hsfsts |= HSFSTS_DONE;
   12877 		if (sc->sc_type >= WM_T_PCH_SPT)
   12878 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12879 			    hsfsts & 0xffffUL);
   12880 		else
   12881 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12882 		error = 0;
   12883 	} else {
   12884 		/*
   12885 		 * Otherwise poll for sometime so the current cycle has a
   12886 		 * chance to end before giving up.
   12887 		 */
   12888 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12889 			if (sc->sc_type >= WM_T_PCH_SPT)
   12890 				hsfsts = ICH8_FLASH_READ32(sc,
   12891 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12892 			else
   12893 				hsfsts = ICH8_FLASH_READ16(sc,
   12894 				    ICH_FLASH_HSFSTS);
   12895 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12896 				error = 0;
   12897 				break;
   12898 			}
   12899 			delay(1);
   12900 		}
   12901 		if (error == 0) {
   12902 			/*
   12903 			 * Successful in waiting for previous cycle to timeout,
   12904 			 * now set the Flash Cycle Done.
   12905 			 */
   12906 			hsfsts |= HSFSTS_DONE;
   12907 			if (sc->sc_type >= WM_T_PCH_SPT)
   12908 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12909 				    hsfsts & 0xffffUL);
   12910 			else
   12911 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12912 				    hsfsts);
   12913 		}
   12914 	}
   12915 	return error;
   12916 }
   12917 
   12918 /******************************************************************************
   12919  * This function starts a flash cycle and waits for its completion
   12920  *
   12921  * sc - The pointer to the hw structure
   12922  ****************************************************************************/
   12923 static int32_t
   12924 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12925 {
   12926 	uint16_t hsflctl;
   12927 	uint16_t hsfsts;
   12928 	int32_t error = 1;
   12929 	uint32_t i = 0;
   12930 
   12931 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12932 	if (sc->sc_type >= WM_T_PCH_SPT)
   12933 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12934 	else
   12935 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12936 	hsflctl |= HSFCTL_GO;
   12937 	if (sc->sc_type >= WM_T_PCH_SPT)
   12938 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12939 		    (uint32_t)hsflctl << 16);
   12940 	else
   12941 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12942 
   12943 	/* Wait till FDONE bit is set to 1 */
   12944 	do {
   12945 		if (sc->sc_type >= WM_T_PCH_SPT)
   12946 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12947 			    & 0xffffUL;
   12948 		else
   12949 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12950 		if (hsfsts & HSFSTS_DONE)
   12951 			break;
   12952 		delay(1);
   12953 		i++;
   12954 	} while (i < timeout);
   12955 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12956 		error = 0;
   12957 
   12958 	return error;
   12959 }
   12960 
   12961 /******************************************************************************
   12962  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12963  *
   12964  * sc - The pointer to the hw structure
   12965  * index - The index of the byte or word to read.
   12966  * size - Size of data to read, 1=byte 2=word, 4=dword
   12967  * data - Pointer to the word to store the value read.
   12968  *****************************************************************************/
   12969 static int32_t
   12970 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12971     uint32_t size, uint32_t *data)
   12972 {
   12973 	uint16_t hsfsts;
   12974 	uint16_t hsflctl;
   12975 	uint32_t flash_linear_address;
   12976 	uint32_t flash_data = 0;
   12977 	int32_t error = 1;
   12978 	int32_t count = 0;
   12979 
   12980 	if (size < 1  || size > 4 || data == 0x0 ||
   12981 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12982 		return error;
   12983 
   12984 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12985 	    sc->sc_ich8_flash_base;
   12986 
   12987 	do {
   12988 		delay(1);
   12989 		/* Steps */
   12990 		error = wm_ich8_cycle_init(sc);
   12991 		if (error)
   12992 			break;
   12993 
   12994 		if (sc->sc_type >= WM_T_PCH_SPT)
   12995 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12996 			    >> 16;
   12997 		else
   12998 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12999 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13000 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13001 		    & HSFCTL_BCOUNT_MASK;
   13002 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13003 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13004 			/*
   13005 			 * In SPT, This register is in Lan memory space, not
   13006 			 * flash. Therefore, only 32 bit access is supported.
   13007 			 */
   13008 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13009 			    (uint32_t)hsflctl << 16);
   13010 		} else
   13011 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13012 
   13013 		/*
   13014 		 * Write the last 24 bits of index into Flash Linear address
   13015 		 * field in Flash Address
   13016 		 */
   13017 		/* TODO: TBD maybe check the index against the size of flash */
   13018 
   13019 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13020 
   13021 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13022 
   13023 		/*
   13024 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13025 		 * the whole sequence a few more times, else read in (shift in)
   13026 		 * the Flash Data0, the order is least significant byte first
   13027 		 * msb to lsb
   13028 		 */
   13029 		if (error == 0) {
   13030 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13031 			if (size == 1)
   13032 				*data = (uint8_t)(flash_data & 0x000000FF);
   13033 			else if (size == 2)
   13034 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13035 			else if (size == 4)
   13036 				*data = (uint32_t)flash_data;
   13037 			break;
   13038 		} else {
   13039 			/*
   13040 			 * If we've gotten here, then things are probably
   13041 			 * completely hosed, but if the error condition is
   13042 			 * detected, it won't hurt to give it another try...
   13043 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13044 			 */
   13045 			if (sc->sc_type >= WM_T_PCH_SPT)
   13046 				hsfsts = ICH8_FLASH_READ32(sc,
   13047 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13048 			else
   13049 				hsfsts = ICH8_FLASH_READ16(sc,
   13050 				    ICH_FLASH_HSFSTS);
   13051 
   13052 			if (hsfsts & HSFSTS_ERR) {
   13053 				/* Repeat for some time before giving up. */
   13054 				continue;
   13055 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13056 				break;
   13057 		}
   13058 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13059 
   13060 	return error;
   13061 }
   13062 
   13063 /******************************************************************************
   13064  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13065  *
   13066  * sc - pointer to wm_hw structure
   13067  * index - The index of the byte to read.
   13068  * data - Pointer to a byte to store the value read.
   13069  *****************************************************************************/
   13070 static int32_t
   13071 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13072 {
   13073 	int32_t status;
   13074 	uint32_t word = 0;
   13075 
   13076 	status = wm_read_ich8_data(sc, index, 1, &word);
   13077 	if (status == 0)
   13078 		*data = (uint8_t)word;
   13079 	else
   13080 		*data = 0;
   13081 
   13082 	return status;
   13083 }
   13084 
   13085 /******************************************************************************
   13086  * Reads a word from the NVM using the ICH8 flash access registers.
   13087  *
   13088  * sc - pointer to wm_hw structure
   13089  * index - The starting byte index of the word to read.
   13090  * data - Pointer to a word to store the value read.
   13091  *****************************************************************************/
   13092 static int32_t
   13093 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13094 {
   13095 	int32_t status;
   13096 	uint32_t word = 0;
   13097 
   13098 	status = wm_read_ich8_data(sc, index, 2, &word);
   13099 	if (status == 0)
   13100 		*data = (uint16_t)word;
   13101 	else
   13102 		*data = 0;
   13103 
   13104 	return status;
   13105 }
   13106 
   13107 /******************************************************************************
   13108  * Reads a dword from the NVM using the ICH8 flash access registers.
   13109  *
   13110  * sc - pointer to wm_hw structure
   13111  * index - The starting byte index of the word to read.
   13112  * data - Pointer to a word to store the value read.
   13113  *****************************************************************************/
   13114 static int32_t
   13115 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13116 {
   13117 	int32_t status;
   13118 
   13119 	status = wm_read_ich8_data(sc, index, 4, data);
   13120 	return status;
   13121 }
   13122 
   13123 /******************************************************************************
   13124  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13125  * register.
   13126  *
   13127  * sc - Struct containing variables accessed by shared code
   13128  * offset - offset of word in the EEPROM to read
   13129  * data - word read from the EEPROM
   13130  * words - number of words to read
   13131  *****************************************************************************/
   13132 static int
   13133 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13134 {
   13135 	int32_t	 rv = 0;
   13136 	uint32_t flash_bank = 0;
   13137 	uint32_t act_offset = 0;
   13138 	uint32_t bank_offset = 0;
   13139 	uint16_t word = 0;
   13140 	uint16_t i = 0;
   13141 
   13142 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13143 		device_xname(sc->sc_dev), __func__));
   13144 
   13145 	if (sc->nvm.acquire(sc) != 0)
   13146 		return -1;
   13147 
   13148 	/*
   13149 	 * We need to know which is the valid flash bank.  In the event
   13150 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13151 	 * managing flash_bank. So it cannot be trusted and needs
   13152 	 * to be updated with each read.
   13153 	 */
   13154 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13155 	if (rv) {
   13156 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13157 			device_xname(sc->sc_dev)));
   13158 		flash_bank = 0;
   13159 	}
   13160 
   13161 	/*
   13162 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13163 	 * size
   13164 	 */
   13165 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13166 
   13167 	for (i = 0; i < words; i++) {
   13168 		/* The NVM part needs a byte offset, hence * 2 */
   13169 		act_offset = bank_offset + ((offset + i) * 2);
   13170 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13171 		if (rv) {
   13172 			aprint_error_dev(sc->sc_dev,
   13173 			    "%s: failed to read NVM\n", __func__);
   13174 			break;
   13175 		}
   13176 		data[i] = word;
   13177 	}
   13178 
   13179 	sc->nvm.release(sc);
   13180 	return rv;
   13181 }
   13182 
   13183 /******************************************************************************
   13184  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13185  * register.
   13186  *
   13187  * sc - Struct containing variables accessed by shared code
   13188  * offset - offset of word in the EEPROM to read
   13189  * data - word read from the EEPROM
   13190  * words - number of words to read
   13191  *****************************************************************************/
   13192 static int
   13193 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13194 {
   13195 	int32_t	 rv = 0;
   13196 	uint32_t flash_bank = 0;
   13197 	uint32_t act_offset = 0;
   13198 	uint32_t bank_offset = 0;
   13199 	uint32_t dword = 0;
   13200 	uint16_t i = 0;
   13201 
   13202 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13203 		device_xname(sc->sc_dev), __func__));
   13204 
   13205 	if (sc->nvm.acquire(sc) != 0)
   13206 		return -1;
   13207 
   13208 	/*
   13209 	 * We need to know which is the valid flash bank.  In the event
   13210 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13211 	 * managing flash_bank. So it cannot be trusted and needs
   13212 	 * to be updated with each read.
   13213 	 */
   13214 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13215 	if (rv) {
   13216 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13217 			device_xname(sc->sc_dev)));
   13218 		flash_bank = 0;
   13219 	}
   13220 
   13221 	/*
   13222 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13223 	 * size
   13224 	 */
   13225 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13226 
   13227 	for (i = 0; i < words; i++) {
   13228 		/* The NVM part needs a byte offset, hence * 2 */
   13229 		act_offset = bank_offset + ((offset + i) * 2);
   13230 		/* but we must read dword aligned, so mask ... */
   13231 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13232 		if (rv) {
   13233 			aprint_error_dev(sc->sc_dev,
   13234 			    "%s: failed to read NVM\n", __func__);
   13235 			break;
   13236 		}
   13237 		/* ... and pick out low or high word */
   13238 		if ((act_offset & 0x2) == 0)
   13239 			data[i] = (uint16_t)(dword & 0xFFFF);
   13240 		else
   13241 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13242 	}
   13243 
   13244 	sc->nvm.release(sc);
   13245 	return rv;
   13246 }
   13247 
   13248 /* iNVM */
   13249 
   13250 static int
   13251 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13252 {
   13253 	int32_t	 rv = 0;
   13254 	uint32_t invm_dword;
   13255 	uint16_t i;
   13256 	uint8_t record_type, word_address;
   13257 
   13258 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13259 		device_xname(sc->sc_dev), __func__));
   13260 
   13261 	for (i = 0; i < INVM_SIZE; i++) {
   13262 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13263 		/* Get record type */
   13264 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13265 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13266 			break;
   13267 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13268 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13269 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13270 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13271 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13272 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13273 			if (word_address == address) {
   13274 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13275 				rv = 0;
   13276 				break;
   13277 			}
   13278 		}
   13279 	}
   13280 
   13281 	return rv;
   13282 }
   13283 
   13284 static int
   13285 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13286 {
   13287 	int rv = 0;
   13288 	int i;
   13289 
   13290 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13291 		device_xname(sc->sc_dev), __func__));
   13292 
   13293 	if (sc->nvm.acquire(sc) != 0)
   13294 		return -1;
   13295 
   13296 	for (i = 0; i < words; i++) {
   13297 		switch (offset + i) {
   13298 		case NVM_OFF_MACADDR:
   13299 		case NVM_OFF_MACADDR1:
   13300 		case NVM_OFF_MACADDR2:
   13301 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13302 			if (rv != 0) {
   13303 				data[i] = 0xffff;
   13304 				rv = -1;
   13305 			}
   13306 			break;
   13307 		case NVM_OFF_CFG2:
   13308 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13309 			if (rv != 0) {
   13310 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13311 				rv = 0;
   13312 			}
   13313 			break;
   13314 		case NVM_OFF_CFG4:
   13315 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13316 			if (rv != 0) {
   13317 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13318 				rv = 0;
   13319 			}
   13320 			break;
   13321 		case NVM_OFF_LED_1_CFG:
   13322 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13323 			if (rv != 0) {
   13324 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13325 				rv = 0;
   13326 			}
   13327 			break;
   13328 		case NVM_OFF_LED_0_2_CFG:
   13329 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13330 			if (rv != 0) {
   13331 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13332 				rv = 0;
   13333 			}
   13334 			break;
   13335 		case NVM_OFF_ID_LED_SETTINGS:
   13336 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13337 			if (rv != 0) {
   13338 				*data = ID_LED_RESERVED_FFFF;
   13339 				rv = 0;
   13340 			}
   13341 			break;
   13342 		default:
   13343 			DPRINTF(WM_DEBUG_NVM,
   13344 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13345 			*data = NVM_RESERVED_WORD;
   13346 			break;
   13347 		}
   13348 	}
   13349 
   13350 	sc->nvm.release(sc);
   13351 	return rv;
   13352 }
   13353 
   13354 /* Lock, detecting NVM type, validate checksum, version and read */
   13355 
   13356 static int
   13357 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13358 {
   13359 	uint32_t eecd = 0;
   13360 
   13361 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13362 	    || sc->sc_type == WM_T_82583) {
   13363 		eecd = CSR_READ(sc, WMREG_EECD);
   13364 
   13365 		/* Isolate bits 15 & 16 */
   13366 		eecd = ((eecd >> 15) & 0x03);
   13367 
   13368 		/* If both bits are set, device is Flash type */
   13369 		if (eecd == 0x03)
   13370 			return 0;
   13371 	}
   13372 	return 1;
   13373 }
   13374 
   13375 static int
   13376 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13377 {
   13378 	uint32_t eec;
   13379 
   13380 	eec = CSR_READ(sc, WMREG_EEC);
   13381 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13382 		return 1;
   13383 
   13384 	return 0;
   13385 }
   13386 
   13387 /*
   13388  * wm_nvm_validate_checksum
   13389  *
   13390  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13391  */
   13392 static int
   13393 wm_nvm_validate_checksum(struct wm_softc *sc)
   13394 {
   13395 	uint16_t checksum;
   13396 	uint16_t eeprom_data;
   13397 #ifdef WM_DEBUG
   13398 	uint16_t csum_wordaddr, valid_checksum;
   13399 #endif
   13400 	int i;
   13401 
   13402 	checksum = 0;
   13403 
   13404 	/* Don't check for I211 */
   13405 	if (sc->sc_type == WM_T_I211)
   13406 		return 0;
   13407 
   13408 #ifdef WM_DEBUG
   13409 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13410 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13411 		csum_wordaddr = NVM_OFF_COMPAT;
   13412 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13413 	} else {
   13414 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13415 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13416 	}
   13417 
   13418 	/* Dump EEPROM image for debug */
   13419 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13420 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13421 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13422 		/* XXX PCH_SPT? */
   13423 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13424 		if ((eeprom_data & valid_checksum) == 0)
   13425 			DPRINTF(WM_DEBUG_NVM,
   13426 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13427 				device_xname(sc->sc_dev), eeprom_data,
   13428 				    valid_checksum));
   13429 	}
   13430 
   13431 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13432 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13433 		for (i = 0; i < NVM_SIZE; i++) {
   13434 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13435 				printf("XXXX ");
   13436 			else
   13437 				printf("%04hx ", eeprom_data);
   13438 			if (i % 8 == 7)
   13439 				printf("\n");
   13440 		}
   13441 	}
   13442 
   13443 #endif /* WM_DEBUG */
   13444 
   13445 	for (i = 0; i < NVM_SIZE; i++) {
   13446 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13447 			return 1;
   13448 		checksum += eeprom_data;
   13449 	}
   13450 
   13451 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13452 #ifdef WM_DEBUG
   13453 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13454 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13455 #endif
   13456 	}
   13457 
   13458 	return 0;
   13459 }
   13460 
   13461 static void
   13462 wm_nvm_version_invm(struct wm_softc *sc)
   13463 {
   13464 	uint32_t dword;
   13465 
   13466 	/*
   13467 	 * Linux's code to decode version is very strange, so we don't
   13468 	 * obey that algorithm and just use word 61 as the document.
   13469 	 * Perhaps it's not perfect though...
   13470 	 *
   13471 	 * Example:
   13472 	 *
   13473 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13474 	 */
   13475 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13476 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13477 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13478 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13479 }
   13480 
   13481 static void
   13482 wm_nvm_version(struct wm_softc *sc)
   13483 {
   13484 	uint16_t major, minor, build, patch;
   13485 	uint16_t uid0, uid1;
   13486 	uint16_t nvm_data;
   13487 	uint16_t off;
   13488 	bool check_version = false;
   13489 	bool check_optionrom = false;
   13490 	bool have_build = false;
   13491 	bool have_uid = true;
   13492 
   13493 	/*
   13494 	 * Version format:
   13495 	 *
   13496 	 * XYYZ
   13497 	 * X0YZ
   13498 	 * X0YY
   13499 	 *
   13500 	 * Example:
   13501 	 *
   13502 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13503 	 *	82571	0x50a6	5.10.6?
   13504 	 *	82572	0x506a	5.6.10?
   13505 	 *	82572EI	0x5069	5.6.9?
   13506 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13507 	 *		0x2013	2.1.3?
   13508 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   13509 	 * ICH8+82567	0x0040	0.4.0?
   13510 	 * ICH9+82566	0x1040	1.4.0?
   13511 	 *ICH10+82567	0x0043	0.4.3?
   13512 	 *  PCH+82577	0x00c1	0.12.1?
   13513 	 * PCH2+82579	0x00d3	0.13.3?
   13514 	 *		0x00d4	0.13.4?
   13515 	 *  LPT+I218	0x0023	0.2.3?
   13516 	 *  SPT+I219	0x0084	0.8.4?
   13517 	 *  CNP+I219	0x0054	0.5.4?
   13518 	 */
   13519 
   13520 	/*
   13521 	 * XXX
   13522 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13523 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13524 	 */
   13525 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13526 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13527 		have_uid = false;
   13528 
   13529 	switch (sc->sc_type) {
   13530 	case WM_T_82571:
   13531 	case WM_T_82572:
   13532 	case WM_T_82574:
   13533 	case WM_T_82583:
   13534 		check_version = true;
   13535 		check_optionrom = true;
   13536 		have_build = true;
   13537 		break;
   13538 	case WM_T_ICH8:
   13539 	case WM_T_ICH9:
   13540 	case WM_T_ICH10:
   13541 	case WM_T_PCH:
   13542 	case WM_T_PCH2:
   13543 	case WM_T_PCH_LPT:
   13544 	case WM_T_PCH_SPT:
   13545 	case WM_T_PCH_CNP:
   13546 		check_version = true;
   13547 		have_build = true;
   13548 		have_uid = false;
   13549 		break;
   13550 	case WM_T_82575:
   13551 	case WM_T_82576:
   13552 	case WM_T_82580:
   13553 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13554 			check_version = true;
   13555 		break;
   13556 	case WM_T_I211:
   13557 		wm_nvm_version_invm(sc);
   13558 		have_uid = false;
   13559 		goto printver;
   13560 	case WM_T_I210:
   13561 		if (!wm_nvm_flash_presence_i210(sc)) {
   13562 			wm_nvm_version_invm(sc);
   13563 			have_uid = false;
   13564 			goto printver;
   13565 		}
   13566 		/* FALLTHROUGH */
   13567 	case WM_T_I350:
   13568 	case WM_T_I354:
   13569 		check_version = true;
   13570 		check_optionrom = true;
   13571 		break;
   13572 	default:
   13573 		return;
   13574 	}
   13575 	if (check_version
   13576 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13577 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13578 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13579 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13580 			build = nvm_data & NVM_BUILD_MASK;
   13581 			have_build = true;
   13582 		} else
   13583 			minor = nvm_data & 0x00ff;
   13584 
   13585 		/* Decimal */
   13586 		minor = (minor / 16) * 10 + (minor % 16);
   13587 		sc->sc_nvm_ver_major = major;
   13588 		sc->sc_nvm_ver_minor = minor;
   13589 
   13590 printver:
   13591 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13592 		    sc->sc_nvm_ver_minor);
   13593 		if (have_build) {
   13594 			sc->sc_nvm_ver_build = build;
   13595 			aprint_verbose(".%d", build);
   13596 		}
   13597 	}
   13598 
   13599 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13600 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13601 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13602 		/* Option ROM Version */
   13603 		if ((off != 0x0000) && (off != 0xffff)) {
   13604 			int rv;
   13605 
   13606 			off += NVM_COMBO_VER_OFF;
   13607 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13608 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13609 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13610 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13611 				/* 16bits */
   13612 				major = uid0 >> 8;
   13613 				build = (uid0 << 8) | (uid1 >> 8);
   13614 				patch = uid1 & 0x00ff;
   13615 				aprint_verbose(", option ROM Version %d.%d.%d",
   13616 				    major, build, patch);
   13617 			}
   13618 		}
   13619 	}
   13620 
   13621 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13622 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13623 }
   13624 
   13625 /*
   13626  * wm_nvm_read:
   13627  *
   13628  *	Read data from the serial EEPROM.
   13629  */
   13630 static int
   13631 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13632 {
   13633 	int rv;
   13634 
   13635 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13636 		device_xname(sc->sc_dev), __func__));
   13637 
   13638 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13639 		return -1;
   13640 
   13641 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13642 
   13643 	return rv;
   13644 }
   13645 
   13646 /*
   13647  * Hardware semaphores.
   13648  * Very complexed...
   13649  */
   13650 
   13651 static int
   13652 wm_get_null(struct wm_softc *sc)
   13653 {
   13654 
   13655 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13656 		device_xname(sc->sc_dev), __func__));
   13657 	return 0;
   13658 }
   13659 
   13660 static void
   13661 wm_put_null(struct wm_softc *sc)
   13662 {
   13663 
   13664 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13665 		device_xname(sc->sc_dev), __func__));
   13666 	return;
   13667 }
   13668 
   13669 static int
   13670 wm_get_eecd(struct wm_softc *sc)
   13671 {
   13672 	uint32_t reg;
   13673 	int x;
   13674 
   13675 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13676 		device_xname(sc->sc_dev), __func__));
   13677 
   13678 	reg = CSR_READ(sc, WMREG_EECD);
   13679 
   13680 	/* Request EEPROM access. */
   13681 	reg |= EECD_EE_REQ;
   13682 	CSR_WRITE(sc, WMREG_EECD, reg);
   13683 
   13684 	/* ..and wait for it to be granted. */
   13685 	for (x = 0; x < 1000; x++) {
   13686 		reg = CSR_READ(sc, WMREG_EECD);
   13687 		if (reg & EECD_EE_GNT)
   13688 			break;
   13689 		delay(5);
   13690 	}
   13691 	if ((reg & EECD_EE_GNT) == 0) {
   13692 		aprint_error_dev(sc->sc_dev,
   13693 		    "could not acquire EEPROM GNT\n");
   13694 		reg &= ~EECD_EE_REQ;
   13695 		CSR_WRITE(sc, WMREG_EECD, reg);
   13696 		return -1;
   13697 	}
   13698 
   13699 	return 0;
   13700 }
   13701 
   13702 static void
   13703 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13704 {
   13705 
   13706 	*eecd |= EECD_SK;
   13707 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13708 	CSR_WRITE_FLUSH(sc);
   13709 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13710 		delay(1);
   13711 	else
   13712 		delay(50);
   13713 }
   13714 
   13715 static void
   13716 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13717 {
   13718 
   13719 	*eecd &= ~EECD_SK;
   13720 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13721 	CSR_WRITE_FLUSH(sc);
   13722 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13723 		delay(1);
   13724 	else
   13725 		delay(50);
   13726 }
   13727 
   13728 static void
   13729 wm_put_eecd(struct wm_softc *sc)
   13730 {
   13731 	uint32_t reg;
   13732 
   13733 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13734 		device_xname(sc->sc_dev), __func__));
   13735 
   13736 	/* Stop nvm */
   13737 	reg = CSR_READ(sc, WMREG_EECD);
   13738 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13739 		/* Pull CS high */
   13740 		reg |= EECD_CS;
   13741 		wm_nvm_eec_clock_lower(sc, &reg);
   13742 	} else {
   13743 		/* CS on Microwire is active-high */
   13744 		reg &= ~(EECD_CS | EECD_DI);
   13745 		CSR_WRITE(sc, WMREG_EECD, reg);
   13746 		wm_nvm_eec_clock_raise(sc, &reg);
   13747 		wm_nvm_eec_clock_lower(sc, &reg);
   13748 	}
   13749 
   13750 	reg = CSR_READ(sc, WMREG_EECD);
   13751 	reg &= ~EECD_EE_REQ;
   13752 	CSR_WRITE(sc, WMREG_EECD, reg);
   13753 
   13754 	return;
   13755 }
   13756 
   13757 /*
   13758  * Get hardware semaphore.
   13759  * Same as e1000_get_hw_semaphore_generic()
   13760  */
   13761 static int
   13762 wm_get_swsm_semaphore(struct wm_softc *sc)
   13763 {
   13764 	int32_t timeout;
   13765 	uint32_t swsm;
   13766 
   13767 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13768 		device_xname(sc->sc_dev), __func__));
   13769 	KASSERT(sc->sc_nvm_wordsize > 0);
   13770 
   13771 retry:
   13772 	/* Get the SW semaphore. */
   13773 	timeout = sc->sc_nvm_wordsize + 1;
   13774 	while (timeout) {
   13775 		swsm = CSR_READ(sc, WMREG_SWSM);
   13776 
   13777 		if ((swsm & SWSM_SMBI) == 0)
   13778 			break;
   13779 
   13780 		delay(50);
   13781 		timeout--;
   13782 	}
   13783 
   13784 	if (timeout == 0) {
   13785 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13786 			/*
   13787 			 * In rare circumstances, the SW semaphore may already
   13788 			 * be held unintentionally. Clear the semaphore once
   13789 			 * before giving up.
   13790 			 */
   13791 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13792 			wm_put_swsm_semaphore(sc);
   13793 			goto retry;
   13794 		}
   13795 		aprint_error_dev(sc->sc_dev,
   13796 		    "could not acquire SWSM SMBI\n");
   13797 		return 1;
   13798 	}
   13799 
   13800 	/* Get the FW semaphore. */
   13801 	timeout = sc->sc_nvm_wordsize + 1;
   13802 	while (timeout) {
   13803 		swsm = CSR_READ(sc, WMREG_SWSM);
   13804 		swsm |= SWSM_SWESMBI;
   13805 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13806 		/* If we managed to set the bit we got the semaphore. */
   13807 		swsm = CSR_READ(sc, WMREG_SWSM);
   13808 		if (swsm & SWSM_SWESMBI)
   13809 			break;
   13810 
   13811 		delay(50);
   13812 		timeout--;
   13813 	}
   13814 
   13815 	if (timeout == 0) {
   13816 		aprint_error_dev(sc->sc_dev,
   13817 		    "could not acquire SWSM SWESMBI\n");
   13818 		/* Release semaphores */
   13819 		wm_put_swsm_semaphore(sc);
   13820 		return 1;
   13821 	}
   13822 	return 0;
   13823 }
   13824 
   13825 /*
   13826  * Put hardware semaphore.
   13827  * Same as e1000_put_hw_semaphore_generic()
   13828  */
   13829 static void
   13830 wm_put_swsm_semaphore(struct wm_softc *sc)
   13831 {
   13832 	uint32_t swsm;
   13833 
   13834 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13835 		device_xname(sc->sc_dev), __func__));
   13836 
   13837 	swsm = CSR_READ(sc, WMREG_SWSM);
   13838 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13839 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13840 }
   13841 
   13842 /*
   13843  * Get SW/FW semaphore.
   13844  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13845  */
   13846 static int
   13847 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13848 {
   13849 	uint32_t swfw_sync;
   13850 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13851 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13852 	int timeout;
   13853 
   13854 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13855 		device_xname(sc->sc_dev), __func__));
   13856 
   13857 	if (sc->sc_type == WM_T_80003)
   13858 		timeout = 50;
   13859 	else
   13860 		timeout = 200;
   13861 
   13862 	while (timeout) {
   13863 		if (wm_get_swsm_semaphore(sc)) {
   13864 			aprint_error_dev(sc->sc_dev,
   13865 			    "%s: failed to get semaphore\n",
   13866 			    __func__);
   13867 			return 1;
   13868 		}
   13869 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13870 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13871 			swfw_sync |= swmask;
   13872 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13873 			wm_put_swsm_semaphore(sc);
   13874 			return 0;
   13875 		}
   13876 		wm_put_swsm_semaphore(sc);
   13877 		delay(5000);
   13878 		timeout--;
   13879 	}
   13880 	device_printf(sc->sc_dev,
   13881 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13882 	    mask, swfw_sync);
   13883 	return 1;
   13884 }
   13885 
   13886 static void
   13887 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13888 {
   13889 	uint32_t swfw_sync;
   13890 
   13891 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13892 		device_xname(sc->sc_dev), __func__));
   13893 
   13894 	while (wm_get_swsm_semaphore(sc) != 0)
   13895 		continue;
   13896 
   13897 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13898 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13899 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13900 
   13901 	wm_put_swsm_semaphore(sc);
   13902 }
   13903 
   13904 static int
   13905 wm_get_nvm_80003(struct wm_softc *sc)
   13906 {
   13907 	int rv;
   13908 
   13909 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13910 		device_xname(sc->sc_dev), __func__));
   13911 
   13912 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13913 		aprint_error_dev(sc->sc_dev,
   13914 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   13915 		return rv;
   13916 	}
   13917 
   13918 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13919 	    && (rv = wm_get_eecd(sc)) != 0) {
   13920 		aprint_error_dev(sc->sc_dev,
   13921 		    "%s: failed to get semaphore(EECD)\n", __func__);
   13922 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13923 		return rv;
   13924 	}
   13925 
   13926 	return 0;
   13927 }
   13928 
   13929 static void
   13930 wm_put_nvm_80003(struct wm_softc *sc)
   13931 {
   13932 
   13933 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13934 		device_xname(sc->sc_dev), __func__));
   13935 
   13936 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13937 		wm_put_eecd(sc);
   13938 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13939 }
   13940 
   13941 static int
   13942 wm_get_nvm_82571(struct wm_softc *sc)
   13943 {
   13944 	int rv;
   13945 
   13946 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13947 		device_xname(sc->sc_dev), __func__));
   13948 
   13949 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13950 		return rv;
   13951 
   13952 	switch (sc->sc_type) {
   13953 	case WM_T_82573:
   13954 		break;
   13955 	default:
   13956 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13957 			rv = wm_get_eecd(sc);
   13958 		break;
   13959 	}
   13960 
   13961 	if (rv != 0) {
   13962 		aprint_error_dev(sc->sc_dev,
   13963 		    "%s: failed to get semaphore\n",
   13964 		    __func__);
   13965 		wm_put_swsm_semaphore(sc);
   13966 	}
   13967 
   13968 	return rv;
   13969 }
   13970 
   13971 static void
   13972 wm_put_nvm_82571(struct wm_softc *sc)
   13973 {
   13974 
   13975 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13976 		device_xname(sc->sc_dev), __func__));
   13977 
   13978 	switch (sc->sc_type) {
   13979 	case WM_T_82573:
   13980 		break;
   13981 	default:
   13982 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13983 			wm_put_eecd(sc);
   13984 		break;
   13985 	}
   13986 
   13987 	wm_put_swsm_semaphore(sc);
   13988 }
   13989 
   13990 static int
   13991 wm_get_phy_82575(struct wm_softc *sc)
   13992 {
   13993 
   13994 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13995 		device_xname(sc->sc_dev), __func__));
   13996 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13997 }
   13998 
   13999 static void
   14000 wm_put_phy_82575(struct wm_softc *sc)
   14001 {
   14002 
   14003 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14004 		device_xname(sc->sc_dev), __func__));
   14005 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14006 }
   14007 
   14008 static int
   14009 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14010 {
   14011 	uint32_t ext_ctrl;
   14012 	int timeout = 200;
   14013 
   14014 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14015 		device_xname(sc->sc_dev), __func__));
   14016 
   14017 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14018 	for (timeout = 0; timeout < 200; timeout++) {
   14019 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14020 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14021 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14022 
   14023 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14024 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14025 			return 0;
   14026 		delay(5000);
   14027 	}
   14028 	device_printf(sc->sc_dev,
   14029 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14030 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14031 	return 1;
   14032 }
   14033 
   14034 static void
   14035 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14036 {
   14037 	uint32_t ext_ctrl;
   14038 
   14039 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14040 		device_xname(sc->sc_dev), __func__));
   14041 
   14042 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14043 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14044 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14045 
   14046 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14047 }
   14048 
   14049 static int
   14050 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14051 {
   14052 	uint32_t ext_ctrl;
   14053 	int timeout;
   14054 
   14055 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14056 		device_xname(sc->sc_dev), __func__));
   14057 	mutex_enter(sc->sc_ich_phymtx);
   14058 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14059 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14060 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14061 			break;
   14062 		delay(1000);
   14063 	}
   14064 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14065 		device_printf(sc->sc_dev,
   14066 		    "SW has already locked the resource\n");
   14067 		goto out;
   14068 	}
   14069 
   14070 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14071 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14072 	for (timeout = 0; timeout < 1000; timeout++) {
   14073 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14074 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14075 			break;
   14076 		delay(1000);
   14077 	}
   14078 	if (timeout >= 1000) {
   14079 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14080 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14081 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14082 		goto out;
   14083 	}
   14084 	return 0;
   14085 
   14086 out:
   14087 	mutex_exit(sc->sc_ich_phymtx);
   14088 	return 1;
   14089 }
   14090 
   14091 static void
   14092 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14093 {
   14094 	uint32_t ext_ctrl;
   14095 
   14096 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14097 		device_xname(sc->sc_dev), __func__));
   14098 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14099 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14100 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14101 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14102 	} else {
   14103 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14104 	}
   14105 
   14106 	mutex_exit(sc->sc_ich_phymtx);
   14107 }
   14108 
   14109 static int
   14110 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14111 {
   14112 
   14113 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14114 		device_xname(sc->sc_dev), __func__));
   14115 	mutex_enter(sc->sc_ich_nvmmtx);
   14116 
   14117 	return 0;
   14118 }
   14119 
   14120 static void
   14121 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14122 {
   14123 
   14124 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14125 		device_xname(sc->sc_dev), __func__));
   14126 	mutex_exit(sc->sc_ich_nvmmtx);
   14127 }
   14128 
   14129 static int
   14130 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14131 {
   14132 	int i = 0;
   14133 	uint32_t reg;
   14134 
   14135 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14136 		device_xname(sc->sc_dev), __func__));
   14137 
   14138 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14139 	do {
   14140 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14141 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14142 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14143 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14144 			break;
   14145 		delay(2*1000);
   14146 		i++;
   14147 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14148 
   14149 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14150 		wm_put_hw_semaphore_82573(sc);
   14151 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14152 		    device_xname(sc->sc_dev));
   14153 		return -1;
   14154 	}
   14155 
   14156 	return 0;
   14157 }
   14158 
   14159 static void
   14160 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14161 {
   14162 	uint32_t reg;
   14163 
   14164 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14165 		device_xname(sc->sc_dev), __func__));
   14166 
   14167 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14168 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14169 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14170 }
   14171 
   14172 /*
   14173  * Management mode and power management related subroutines.
   14174  * BMC, AMT, suspend/resume and EEE.
   14175  */
   14176 
   14177 #ifdef WM_WOL
   14178 static int
   14179 wm_check_mng_mode(struct wm_softc *sc)
   14180 {
   14181 	int rv;
   14182 
   14183 	switch (sc->sc_type) {
   14184 	case WM_T_ICH8:
   14185 	case WM_T_ICH9:
   14186 	case WM_T_ICH10:
   14187 	case WM_T_PCH:
   14188 	case WM_T_PCH2:
   14189 	case WM_T_PCH_LPT:
   14190 	case WM_T_PCH_SPT:
   14191 	case WM_T_PCH_CNP:
   14192 		rv = wm_check_mng_mode_ich8lan(sc);
   14193 		break;
   14194 	case WM_T_82574:
   14195 	case WM_T_82583:
   14196 		rv = wm_check_mng_mode_82574(sc);
   14197 		break;
   14198 	case WM_T_82571:
   14199 	case WM_T_82572:
   14200 	case WM_T_82573:
   14201 	case WM_T_80003:
   14202 		rv = wm_check_mng_mode_generic(sc);
   14203 		break;
   14204 	default:
   14205 		/* Noting to do */
   14206 		rv = 0;
   14207 		break;
   14208 	}
   14209 
   14210 	return rv;
   14211 }
   14212 
   14213 static int
   14214 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14215 {
   14216 	uint32_t fwsm;
   14217 
   14218 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14219 
   14220 	if (((fwsm & FWSM_FW_VALID) != 0)
   14221 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14222 		return 1;
   14223 
   14224 	return 0;
   14225 }
   14226 
   14227 static int
   14228 wm_check_mng_mode_82574(struct wm_softc *sc)
   14229 {
   14230 	uint16_t data;
   14231 
   14232 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14233 
   14234 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14235 		return 1;
   14236 
   14237 	return 0;
   14238 }
   14239 
   14240 static int
   14241 wm_check_mng_mode_generic(struct wm_softc *sc)
   14242 {
   14243 	uint32_t fwsm;
   14244 
   14245 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14246 
   14247 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14248 		return 1;
   14249 
   14250 	return 0;
   14251 }
   14252 #endif /* WM_WOL */
   14253 
   14254 static int
   14255 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14256 {
   14257 	uint32_t manc, fwsm, factps;
   14258 
   14259 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14260 		return 0;
   14261 
   14262 	manc = CSR_READ(sc, WMREG_MANC);
   14263 
   14264 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14265 		device_xname(sc->sc_dev), manc));
   14266 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14267 		return 0;
   14268 
   14269 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14270 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14271 		factps = CSR_READ(sc, WMREG_FACTPS);
   14272 		if (((factps & FACTPS_MNGCG) == 0)
   14273 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14274 			return 1;
   14275 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14276 		uint16_t data;
   14277 
   14278 		factps = CSR_READ(sc, WMREG_FACTPS);
   14279 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14280 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14281 			device_xname(sc->sc_dev), factps, data));
   14282 		if (((factps & FACTPS_MNGCG) == 0)
   14283 		    && ((data & NVM_CFG2_MNGM_MASK)
   14284 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14285 			return 1;
   14286 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14287 	    && ((manc & MANC_ASF_EN) == 0))
   14288 		return 1;
   14289 
   14290 	return 0;
   14291 }
   14292 
   14293 static bool
   14294 wm_phy_resetisblocked(struct wm_softc *sc)
   14295 {
   14296 	bool blocked = false;
   14297 	uint32_t reg;
   14298 	int i = 0;
   14299 
   14300 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14301 		device_xname(sc->sc_dev), __func__));
   14302 
   14303 	switch (sc->sc_type) {
   14304 	case WM_T_ICH8:
   14305 	case WM_T_ICH9:
   14306 	case WM_T_ICH10:
   14307 	case WM_T_PCH:
   14308 	case WM_T_PCH2:
   14309 	case WM_T_PCH_LPT:
   14310 	case WM_T_PCH_SPT:
   14311 	case WM_T_PCH_CNP:
   14312 		do {
   14313 			reg = CSR_READ(sc, WMREG_FWSM);
   14314 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14315 				blocked = true;
   14316 				delay(10*1000);
   14317 				continue;
   14318 			}
   14319 			blocked = false;
   14320 		} while (blocked && (i++ < 30));
   14321 		return blocked;
   14322 		break;
   14323 	case WM_T_82571:
   14324 	case WM_T_82572:
   14325 	case WM_T_82573:
   14326 	case WM_T_82574:
   14327 	case WM_T_82583:
   14328 	case WM_T_80003:
   14329 		reg = CSR_READ(sc, WMREG_MANC);
   14330 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14331 			return true;
   14332 		else
   14333 			return false;
   14334 		break;
   14335 	default:
   14336 		/* No problem */
   14337 		break;
   14338 	}
   14339 
   14340 	return false;
   14341 }
   14342 
   14343 static void
   14344 wm_get_hw_control(struct wm_softc *sc)
   14345 {
   14346 	uint32_t reg;
   14347 
   14348 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14349 		device_xname(sc->sc_dev), __func__));
   14350 
   14351 	if (sc->sc_type == WM_T_82573) {
   14352 		reg = CSR_READ(sc, WMREG_SWSM);
   14353 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14354 	} else if (sc->sc_type >= WM_T_82571) {
   14355 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14356 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14357 	}
   14358 }
   14359 
   14360 static void
   14361 wm_release_hw_control(struct wm_softc *sc)
   14362 {
   14363 	uint32_t reg;
   14364 
   14365 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14366 		device_xname(sc->sc_dev), __func__));
   14367 
   14368 	if (sc->sc_type == WM_T_82573) {
   14369 		reg = CSR_READ(sc, WMREG_SWSM);
   14370 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14371 	} else if (sc->sc_type >= WM_T_82571) {
   14372 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14373 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14374 	}
   14375 }
   14376 
   14377 static void
   14378 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14379 {
   14380 	uint32_t reg;
   14381 
   14382 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14383 		device_xname(sc->sc_dev), __func__));
   14384 
   14385 	if (sc->sc_type < WM_T_PCH2)
   14386 		return;
   14387 
   14388 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14389 
   14390 	if (gate)
   14391 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14392 	else
   14393 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14394 
   14395 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14396 }
   14397 
   14398 static int
   14399 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14400 {
   14401 	uint32_t fwsm, reg;
   14402 	int rv = 0;
   14403 
   14404 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14405 		device_xname(sc->sc_dev), __func__));
   14406 
   14407 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14408 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14409 
   14410 	/* Disable ULP */
   14411 	wm_ulp_disable(sc);
   14412 
   14413 	/* Acquire PHY semaphore */
   14414 	rv = sc->phy.acquire(sc);
   14415 	if (rv != 0) {
   14416 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14417 		device_xname(sc->sc_dev), __func__));
   14418 		return -1;
   14419 	}
   14420 
   14421 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14422 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14423 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14424 	 */
   14425 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14426 	switch (sc->sc_type) {
   14427 	case WM_T_PCH_LPT:
   14428 	case WM_T_PCH_SPT:
   14429 	case WM_T_PCH_CNP:
   14430 		if (wm_phy_is_accessible_pchlan(sc))
   14431 			break;
   14432 
   14433 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14434 		 * forcing MAC to SMBus mode first.
   14435 		 */
   14436 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14437 		reg |= CTRL_EXT_FORCE_SMBUS;
   14438 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14439 #if 0
   14440 		/* XXX Isn't this required??? */
   14441 		CSR_WRITE_FLUSH(sc);
   14442 #endif
   14443 		/* Wait 50 milliseconds for MAC to finish any retries
   14444 		 * that it might be trying to perform from previous
   14445 		 * attempts to acknowledge any phy read requests.
   14446 		 */
   14447 		delay(50 * 1000);
   14448 		/* FALLTHROUGH */
   14449 	case WM_T_PCH2:
   14450 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14451 			break;
   14452 		/* FALLTHROUGH */
   14453 	case WM_T_PCH:
   14454 		if (sc->sc_type == WM_T_PCH)
   14455 			if ((fwsm & FWSM_FW_VALID) != 0)
   14456 				break;
   14457 
   14458 		if (wm_phy_resetisblocked(sc) == true) {
   14459 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   14460 			break;
   14461 		}
   14462 
   14463 		/* Toggle LANPHYPC Value bit */
   14464 		wm_toggle_lanphypc_pch_lpt(sc);
   14465 
   14466 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14467 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14468 				break;
   14469 
   14470 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14471 			 * so ensure that the MAC is also out of SMBus mode
   14472 			 */
   14473 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14474 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14475 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14476 
   14477 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14478 				break;
   14479 			rv = -1;
   14480 		}
   14481 		break;
   14482 	default:
   14483 		break;
   14484 	}
   14485 
   14486 	/* Release semaphore */
   14487 	sc->phy.release(sc);
   14488 
   14489 	if (rv == 0) {
   14490 		/* Check to see if able to reset PHY.  Print error if not */
   14491 		if (wm_phy_resetisblocked(sc)) {
   14492 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14493 			goto out;
   14494 		}
   14495 
   14496 		/* Reset the PHY before any access to it.  Doing so, ensures
   14497 		 * that the PHY is in a known good state before we read/write
   14498 		 * PHY registers.  The generic reset is sufficient here,
   14499 		 * because we haven't determined the PHY type yet.
   14500 		 */
   14501 		if (wm_reset_phy(sc) != 0)
   14502 			goto out;
   14503 
   14504 		/* On a successful reset, possibly need to wait for the PHY
   14505 		 * to quiesce to an accessible state before returning control
   14506 		 * to the calling function.  If the PHY does not quiesce, then
   14507 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14508 		 *  the PHY is in.
   14509 		 */
   14510 		if (wm_phy_resetisblocked(sc))
   14511 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14512 	}
   14513 
   14514 out:
   14515 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14516 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14517 		delay(10*1000);
   14518 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14519 	}
   14520 
   14521 	return 0;
   14522 }
   14523 
   14524 static void
   14525 wm_init_manageability(struct wm_softc *sc)
   14526 {
   14527 
   14528 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14529 		device_xname(sc->sc_dev), __func__));
   14530 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14531 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14532 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14533 
   14534 		/* Disable hardware interception of ARP */
   14535 		manc &= ~MANC_ARP_EN;
   14536 
   14537 		/* Enable receiving management packets to the host */
   14538 		if (sc->sc_type >= WM_T_82571) {
   14539 			manc |= MANC_EN_MNG2HOST;
   14540 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14541 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14542 		}
   14543 
   14544 		CSR_WRITE(sc, WMREG_MANC, manc);
   14545 	}
   14546 }
   14547 
   14548 static void
   14549 wm_release_manageability(struct wm_softc *sc)
   14550 {
   14551 
   14552 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14553 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14554 
   14555 		manc |= MANC_ARP_EN;
   14556 		if (sc->sc_type >= WM_T_82571)
   14557 			manc &= ~MANC_EN_MNG2HOST;
   14558 
   14559 		CSR_WRITE(sc, WMREG_MANC, manc);
   14560 	}
   14561 }
   14562 
   14563 static void
   14564 wm_get_wakeup(struct wm_softc *sc)
   14565 {
   14566 
   14567 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14568 	switch (sc->sc_type) {
   14569 	case WM_T_82573:
   14570 	case WM_T_82583:
   14571 		sc->sc_flags |= WM_F_HAS_AMT;
   14572 		/* FALLTHROUGH */
   14573 	case WM_T_80003:
   14574 	case WM_T_82575:
   14575 	case WM_T_82576:
   14576 	case WM_T_82580:
   14577 	case WM_T_I350:
   14578 	case WM_T_I354:
   14579 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14580 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14581 		/* FALLTHROUGH */
   14582 	case WM_T_82541:
   14583 	case WM_T_82541_2:
   14584 	case WM_T_82547:
   14585 	case WM_T_82547_2:
   14586 	case WM_T_82571:
   14587 	case WM_T_82572:
   14588 	case WM_T_82574:
   14589 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14590 		break;
   14591 	case WM_T_ICH8:
   14592 	case WM_T_ICH9:
   14593 	case WM_T_ICH10:
   14594 	case WM_T_PCH:
   14595 	case WM_T_PCH2:
   14596 	case WM_T_PCH_LPT:
   14597 	case WM_T_PCH_SPT:
   14598 	case WM_T_PCH_CNP:
   14599 		sc->sc_flags |= WM_F_HAS_AMT;
   14600 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14601 		break;
   14602 	default:
   14603 		break;
   14604 	}
   14605 
   14606 	/* 1: HAS_MANAGE */
   14607 	if (wm_enable_mng_pass_thru(sc) != 0)
   14608 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14609 
   14610 	/*
   14611 	 * Note that the WOL flags is set after the resetting of the eeprom
   14612 	 * stuff
   14613 	 */
   14614 }
   14615 
   14616 /*
   14617  * Unconfigure Ultra Low Power mode.
   14618  * Only for I217 and newer (see below).
   14619  */
   14620 static int
   14621 wm_ulp_disable(struct wm_softc *sc)
   14622 {
   14623 	uint32_t reg;
   14624 	uint16_t phyreg;
   14625 	int i = 0, rv = 0;
   14626 
   14627 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14628 		device_xname(sc->sc_dev), __func__));
   14629 	/* Exclude old devices */
   14630 	if ((sc->sc_type < WM_T_PCH_LPT)
   14631 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14632 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14633 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14634 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14635 		return 0;
   14636 
   14637 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14638 		/* Request ME un-configure ULP mode in the PHY */
   14639 		reg = CSR_READ(sc, WMREG_H2ME);
   14640 		reg &= ~H2ME_ULP;
   14641 		reg |= H2ME_ENFORCE_SETTINGS;
   14642 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14643 
   14644 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14645 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14646 			if (i++ == 30) {
   14647 				device_printf(sc->sc_dev, "%s timed out\n",
   14648 				    __func__);
   14649 				return -1;
   14650 			}
   14651 			delay(10 * 1000);
   14652 		}
   14653 		reg = CSR_READ(sc, WMREG_H2ME);
   14654 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14655 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14656 
   14657 		return 0;
   14658 	}
   14659 
   14660 	/* Acquire semaphore */
   14661 	rv = sc->phy.acquire(sc);
   14662 	if (rv != 0) {
   14663 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14664 		device_xname(sc->sc_dev), __func__));
   14665 		return -1;
   14666 	}
   14667 
   14668 	/* Toggle LANPHYPC */
   14669 	wm_toggle_lanphypc_pch_lpt(sc);
   14670 
   14671 	/* Unforce SMBus mode in PHY */
   14672 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14673 	if (rv != 0) {
   14674 		uint32_t reg2;
   14675 
   14676 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   14677 			__func__);
   14678 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14679 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14680 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14681 		delay(50 * 1000);
   14682 
   14683 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14684 		    &phyreg);
   14685 		if (rv != 0)
   14686 			goto release;
   14687 	}
   14688 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14689 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14690 
   14691 	/* Unforce SMBus mode in MAC */
   14692 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14693 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14694 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14695 
   14696 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14697 	if (rv != 0)
   14698 		goto release;
   14699 	phyreg |= HV_PM_CTRL_K1_ENA;
   14700 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14701 
   14702 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14703 		&phyreg);
   14704 	if (rv != 0)
   14705 		goto release;
   14706 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14707 	    | I218_ULP_CONFIG1_STICKY_ULP
   14708 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14709 	    | I218_ULP_CONFIG1_WOL_HOST
   14710 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14711 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14712 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14713 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14714 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14715 	phyreg |= I218_ULP_CONFIG1_START;
   14716 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14717 
   14718 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14719 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14720 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14721 
   14722 release:
   14723 	/* Release semaphore */
   14724 	sc->phy.release(sc);
   14725 	wm_gmii_reset(sc);
   14726 	delay(50 * 1000);
   14727 
   14728 	return rv;
   14729 }
   14730 
   14731 /* WOL in the newer chipset interfaces (pchlan) */
   14732 static int
   14733 wm_enable_phy_wakeup(struct wm_softc *sc)
   14734 {
   14735 	device_t dev = sc->sc_dev;
   14736 	uint32_t mreg, moff;
   14737 	uint16_t wuce, wuc, wufc, preg;
   14738 	int i, rv;
   14739 
   14740 	KASSERT(sc->sc_type >= WM_T_PCH);
   14741 
   14742 	/* Copy MAC RARs to PHY RARs */
   14743 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   14744 
   14745 	/* Activate PHY wakeup */
   14746 	rv = sc->phy.acquire(sc);
   14747 	if (rv != 0) {
   14748 		device_printf(dev, "%s: failed to acquire semaphore\n",
   14749 		    __func__);
   14750 		return rv;
   14751 	}
   14752 
   14753 	/*
   14754 	 * Enable access to PHY wakeup registers.
   14755 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   14756 	 */
   14757 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   14758 	if (rv != 0) {
   14759 		device_printf(dev,
   14760 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   14761 		goto release;
   14762 	}
   14763 
   14764 	/* Copy MAC MTA to PHY MTA */
   14765 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   14766 		uint16_t lo, hi;
   14767 
   14768 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   14769 		lo = (uint16_t)(mreg & 0xffff);
   14770 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   14771 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   14772 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   14773 	}
   14774 
   14775 	/* Configure PHY Rx Control register */
   14776 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   14777 	mreg = CSR_READ(sc, WMREG_RCTL);
   14778 	if (mreg & RCTL_UPE)
   14779 		preg |= BM_RCTL_UPE;
   14780 	if (mreg & RCTL_MPE)
   14781 		preg |= BM_RCTL_MPE;
   14782 	preg &= ~(BM_RCTL_MO_MASK);
   14783 	moff = __SHIFTOUT(mreg, RCTL_MO);
   14784 	if (moff != 0)
   14785 		preg |= moff << BM_RCTL_MO_SHIFT;
   14786 	if (mreg & RCTL_BAM)
   14787 		preg |= BM_RCTL_BAM;
   14788 	if (mreg & RCTL_PMCF)
   14789 		preg |= BM_RCTL_PMCF;
   14790 	mreg = CSR_READ(sc, WMREG_CTRL);
   14791 	if (mreg & CTRL_RFCE)
   14792 		preg |= BM_RCTL_RFCE;
   14793 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   14794 
   14795 	wuc = WUC_APME | WUC_PME_EN;
   14796 	wufc = WUFC_MAG;
   14797 	/* Enable PHY wakeup in MAC register */
   14798 	CSR_WRITE(sc, WMREG_WUC,
   14799 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   14800 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   14801 
   14802 	/* Configure and enable PHY wakeup in PHY registers */
   14803 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   14804 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   14805 
   14806 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   14807 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   14808 
   14809 release:
   14810 	sc->phy.release(sc);
   14811 
   14812 	return 0;
   14813 }
   14814 
   14815 /* Power down workaround on D3 */
   14816 static void
   14817 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14818 {
   14819 	uint32_t reg;
   14820 	uint16_t phyreg;
   14821 	int i;
   14822 
   14823 	for (i = 0; i < 2; i++) {
   14824 		/* Disable link */
   14825 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14826 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14827 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14828 
   14829 		/*
   14830 		 * Call gig speed drop workaround on Gig disable before
   14831 		 * accessing any PHY registers
   14832 		 */
   14833 		if (sc->sc_type == WM_T_ICH8)
   14834 			wm_gig_downshift_workaround_ich8lan(sc);
   14835 
   14836 		/* Write VR power-down enable */
   14837 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14838 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14839 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14840 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   14841 
   14842 		/* Read it back and test */
   14843 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14844 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14845 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14846 			break;
   14847 
   14848 		/* Issue PHY reset and repeat at most one more time */
   14849 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14850 	}
   14851 }
   14852 
   14853 /*
   14854  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   14855  *  @sc: pointer to the HW structure
   14856  *
   14857  *  During S0 to Sx transition, it is possible the link remains at gig
   14858  *  instead of negotiating to a lower speed.  Before going to Sx, set
   14859  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   14860  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   14861  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   14862  *  needs to be written.
   14863  *  Parts that support (and are linked to a partner which support) EEE in
   14864  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   14865  *  than 10Mbps w/o EEE.
   14866  */
   14867 static void
   14868 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   14869 {
   14870 	device_t dev = sc->sc_dev;
   14871 	struct ethercom *ec = &sc->sc_ethercom;
   14872 	uint32_t phy_ctrl;
   14873 	int rv;
   14874 
   14875 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   14876 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   14877 
   14878 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   14879 
   14880 	if (sc->sc_phytype == WMPHY_I217) {
   14881 		uint16_t devid = sc->sc_pcidevid;
   14882 
   14883 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   14884 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   14885 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   14886 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   14887 		    (sc->sc_type >= WM_T_PCH_SPT))
   14888 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   14889 			    CSR_READ(sc, WMREG_FEXTNVM6)
   14890 			    & ~FEXTNVM6_REQ_PLL_CLK);
   14891 
   14892 		if (sc->phy.acquire(sc) != 0)
   14893 			goto out;
   14894 
   14895 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   14896 			uint16_t eee_advert;
   14897 
   14898 			rv = wm_read_emi_reg_locked(dev,
   14899 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   14900 			if (rv)
   14901 				goto release;
   14902 
   14903 			/*
   14904 			 * Disable LPLU if both link partners support 100BaseT
   14905 			 * EEE and 100Full is advertised on both ends of the
   14906 			 * link, and enable Auto Enable LPI since there will
   14907 			 * be no driver to enable LPI while in Sx.
   14908 			 */
   14909 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   14910 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   14911 				uint16_t anar, phy_reg;
   14912 
   14913 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   14914 				    &anar);
   14915 				if (anar & ANAR_TX_FD) {
   14916 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   14917 					    PHY_CTRL_NOND0A_LPLU);
   14918 
   14919 					/* Set Auto Enable LPI after link up */
   14920 					sc->phy.readreg_locked(dev, 2,
   14921 					    I217_LPI_GPIO_CTRL, &phy_reg);
   14922 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14923 					sc->phy.writereg_locked(dev, 2,
   14924 					    I217_LPI_GPIO_CTRL, phy_reg);
   14925 				}
   14926 			}
   14927 		}
   14928 
   14929 		/*
   14930 		 * For i217 Intel Rapid Start Technology support,
   14931 		 * when the system is going into Sx and no manageability engine
   14932 		 * is present, the driver must configure proxy to reset only on
   14933 		 * power good.	LPI (Low Power Idle) state must also reset only
   14934 		 * on power good, as well as the MTA (Multicast table array).
   14935 		 * The SMBus release must also be disabled on LCD reset.
   14936 		 */
   14937 
   14938 		/*
   14939 		 * Enable MTA to reset for Intel Rapid Start Technology
   14940 		 * Support
   14941 		 */
   14942 
   14943 release:
   14944 		sc->phy.release(sc);
   14945 	}
   14946 out:
   14947 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   14948 
   14949 	if (sc->sc_type == WM_T_ICH8)
   14950 		wm_gig_downshift_workaround_ich8lan(sc);
   14951 
   14952 	if (sc->sc_type >= WM_T_PCH) {
   14953 		wm_oem_bits_config_ich8lan(sc, false);
   14954 
   14955 		/* Reset PHY to activate OEM bits on 82577/8 */
   14956 		if (sc->sc_type == WM_T_PCH)
   14957 			wm_reset_phy(sc);
   14958 
   14959 		if (sc->phy.acquire(sc) != 0)
   14960 			return;
   14961 		wm_write_smbus_addr(sc);
   14962 		sc->phy.release(sc);
   14963 	}
   14964 }
   14965 
   14966 /*
   14967  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   14968  *  @sc: pointer to the HW structure
   14969  *
   14970  *  During Sx to S0 transitions on non-managed devices or managed devices
   14971  *  on which PHY resets are not blocked, if the PHY registers cannot be
   14972  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   14973  *  the PHY.
   14974  *  On i217, setup Intel Rapid Start Technology.
   14975  */
   14976 static int
   14977 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   14978 {
   14979 	device_t dev = sc->sc_dev;
   14980 	int rv;
   14981 
   14982 	if (sc->sc_type < WM_T_PCH2)
   14983 		return 0;
   14984 
   14985 	rv = wm_init_phy_workarounds_pchlan(sc);
   14986 	if (rv != 0)
   14987 		return -1;
   14988 
   14989 	/* For i217 Intel Rapid Start Technology support when the system
   14990 	 * is transitioning from Sx and no manageability engine is present
   14991 	 * configure SMBus to restore on reset, disable proxy, and enable
   14992 	 * the reset on MTA (Multicast table array).
   14993 	 */
   14994 	if (sc->sc_phytype == WMPHY_I217) {
   14995 		uint16_t phy_reg;
   14996 
   14997 		if (sc->phy.acquire(sc) != 0)
   14998 			return -1;
   14999 
   15000 		/* Clear Auto Enable LPI after link up */
   15001 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15002 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15003 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15004 
   15005 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15006 			/* Restore clear on SMB if no manageability engine
   15007 			 * is present
   15008 			 */
   15009 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15010 			    &phy_reg);
   15011 			if (rv != 0)
   15012 				goto release;
   15013 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15014 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15015 
   15016 			/* Disable Proxy */
   15017 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15018 		}
   15019 		/* Enable reset on MTA */
   15020 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15021 		if (rv != 0)
   15022 			goto release;
   15023 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15024 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15025 
   15026 release:
   15027 		sc->phy.release(sc);
   15028 		return rv;
   15029 	}
   15030 
   15031 	return 0;
   15032 }
   15033 
   15034 static void
   15035 wm_enable_wakeup(struct wm_softc *sc)
   15036 {
   15037 	uint32_t reg, pmreg;
   15038 	pcireg_t pmode;
   15039 	int rv = 0;
   15040 
   15041 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15042 		device_xname(sc->sc_dev), __func__));
   15043 
   15044 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15045 	    &pmreg, NULL) == 0)
   15046 		return;
   15047 
   15048 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15049 		goto pme;
   15050 
   15051 	/* Advertise the wakeup capability */
   15052 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15053 	    | CTRL_SWDPIN(3));
   15054 
   15055 	/* Keep the laser running on fiber adapters */
   15056 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15057 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15058 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15059 		reg |= CTRL_EXT_SWDPIN(3);
   15060 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15061 	}
   15062 
   15063 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15064 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15065 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15066 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15067 		wm_suspend_workarounds_ich8lan(sc);
   15068 
   15069 #if 0	/* For the multicast packet */
   15070 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15071 	reg |= WUFC_MC;
   15072 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15073 #endif
   15074 
   15075 	if (sc->sc_type >= WM_T_PCH) {
   15076 		rv = wm_enable_phy_wakeup(sc);
   15077 		if (rv != 0)
   15078 			goto pme;
   15079 	} else {
   15080 		/* Enable wakeup by the MAC */
   15081 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15082 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15083 	}
   15084 
   15085 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15086 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15087 		|| (sc->sc_type == WM_T_PCH2))
   15088 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15089 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15090 
   15091 pme:
   15092 	/* Request PME */
   15093 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15094 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15095 		/* For WOL */
   15096 		pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   15097 	} else {
   15098 		/* Disable WOL */
   15099 		pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   15100 	}
   15101 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15102 }
   15103 
   15104 /* Disable ASPM L0s and/or L1 for workaround */
   15105 static void
   15106 wm_disable_aspm(struct wm_softc *sc)
   15107 {
   15108 	pcireg_t reg, mask = 0;
   15109 	unsigned const char *str = "";
   15110 
   15111 	/*
   15112 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15113 	 * space.
   15114 	 */
   15115 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15116 		return;
   15117 
   15118 	switch (sc->sc_type) {
   15119 	case WM_T_82571:
   15120 	case WM_T_82572:
   15121 		/*
   15122 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15123 		 * State Power management L1 State (ASPM L1).
   15124 		 */
   15125 		mask = PCIE_LCSR_ASPM_L1;
   15126 		str = "L1 is";
   15127 		break;
   15128 	case WM_T_82573:
   15129 	case WM_T_82574:
   15130 	case WM_T_82583:
   15131 		/*
   15132 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15133 		 *
   15134 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15135 		 * some chipset.  The document of 82574 and 82583 says that
   15136 		 * disabling L0s with some specific chipset is sufficient,
   15137 		 * but we follow as of the Intel em driver does.
   15138 		 *
   15139 		 * References:
   15140 		 * Errata 8 of the Specification Update of i82573.
   15141 		 * Errata 20 of the Specification Update of i82574.
   15142 		 * Errata 9 of the Specification Update of i82583.
   15143 		 */
   15144 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15145 		str = "L0s and L1 are";
   15146 		break;
   15147 	default:
   15148 		return;
   15149 	}
   15150 
   15151 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15152 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15153 	reg &= ~mask;
   15154 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15155 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15156 
   15157 	/* Print only in wm_attach() */
   15158 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15159 		aprint_verbose_dev(sc->sc_dev,
   15160 		    "ASPM %s disabled to workaround the errata.\n", str);
   15161 }
   15162 
   15163 /* LPLU */
   15164 
   15165 static void
   15166 wm_lplu_d0_disable(struct wm_softc *sc)
   15167 {
   15168 	struct mii_data *mii = &sc->sc_mii;
   15169 	uint32_t reg;
   15170 	uint16_t phyval;
   15171 
   15172 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15173 		device_xname(sc->sc_dev), __func__));
   15174 
   15175 	if (sc->sc_phytype == WMPHY_IFE)
   15176 		return;
   15177 
   15178 	switch (sc->sc_type) {
   15179 	case WM_T_82571:
   15180 	case WM_T_82572:
   15181 	case WM_T_82573:
   15182 	case WM_T_82575:
   15183 	case WM_T_82576:
   15184 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   15185 		phyval &= ~PMR_D0_LPLU;
   15186 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   15187 		break;
   15188 	case WM_T_82580:
   15189 	case WM_T_I350:
   15190 	case WM_T_I210:
   15191 	case WM_T_I211:
   15192 		reg = CSR_READ(sc, WMREG_PHPM);
   15193 		reg &= ~PHPM_D0A_LPLU;
   15194 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15195 		break;
   15196 	case WM_T_82574:
   15197 	case WM_T_82583:
   15198 	case WM_T_ICH8:
   15199 	case WM_T_ICH9:
   15200 	case WM_T_ICH10:
   15201 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15202 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15203 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15204 		CSR_WRITE_FLUSH(sc);
   15205 		break;
   15206 	case WM_T_PCH:
   15207 	case WM_T_PCH2:
   15208 	case WM_T_PCH_LPT:
   15209 	case WM_T_PCH_SPT:
   15210 	case WM_T_PCH_CNP:
   15211 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15212 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15213 		if (wm_phy_resetisblocked(sc) == false)
   15214 			phyval |= HV_OEM_BITS_ANEGNOW;
   15215 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15216 		break;
   15217 	default:
   15218 		break;
   15219 	}
   15220 }
   15221 
   15222 /* EEE */
   15223 
   15224 static int
   15225 wm_set_eee_i350(struct wm_softc *sc)
   15226 {
   15227 	struct ethercom *ec = &sc->sc_ethercom;
   15228 	uint32_t ipcnfg, eeer;
   15229 	uint32_t ipcnfg_mask
   15230 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15231 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15232 
   15233 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15234 
   15235 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15236 	eeer = CSR_READ(sc, WMREG_EEER);
   15237 
   15238 	/* Enable or disable per user setting */
   15239 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15240 		ipcnfg |= ipcnfg_mask;
   15241 		eeer |= eeer_mask;
   15242 	} else {
   15243 		ipcnfg &= ~ipcnfg_mask;
   15244 		eeer &= ~eeer_mask;
   15245 	}
   15246 
   15247 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15248 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15249 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15250 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15251 
   15252 	return 0;
   15253 }
   15254 
   15255 static int
   15256 wm_set_eee_pchlan(struct wm_softc *sc)
   15257 {
   15258 	device_t dev = sc->sc_dev;
   15259 	struct ethercom *ec = &sc->sc_ethercom;
   15260 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15261 	int rv = 0;
   15262 
   15263 	switch (sc->sc_phytype) {
   15264 	case WMPHY_82579:
   15265 		lpa = I82579_EEE_LP_ABILITY;
   15266 		pcs_status = I82579_EEE_PCS_STATUS;
   15267 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15268 		break;
   15269 	case WMPHY_I217:
   15270 		lpa = I217_EEE_LP_ABILITY;
   15271 		pcs_status = I217_EEE_PCS_STATUS;
   15272 		adv_addr = I217_EEE_ADVERTISEMENT;
   15273 		break;
   15274 	default:
   15275 		return 0;
   15276 	}
   15277 
   15278 	if (sc->phy.acquire(sc)) {
   15279 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15280 		return 0;
   15281 	}
   15282 
   15283 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15284 	if (rv != 0)
   15285 		goto release;
   15286 
   15287 	/* Clear bits that enable EEE in various speeds */
   15288 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15289 
   15290 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15291 		/* Save off link partner's EEE ability */
   15292 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15293 		if (rv != 0)
   15294 			goto release;
   15295 
   15296 		/* Read EEE advertisement */
   15297 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15298 			goto release;
   15299 
   15300 		/*
   15301 		 * Enable EEE only for speeds in which the link partner is
   15302 		 * EEE capable and for which we advertise EEE.
   15303 		 */
   15304 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15305 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15306 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15307 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15308 			if ((data & ANLPAR_TX_FD) != 0)
   15309 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15310 			else {
   15311 				/*
   15312 				 * EEE is not supported in 100Half, so ignore
   15313 				 * partner's EEE in 100 ability if full-duplex
   15314 				 * is not advertised.
   15315 				 */
   15316 				sc->eee_lp_ability
   15317 				    &= ~AN_EEEADVERT_100_TX;
   15318 			}
   15319 		}
   15320 	}
   15321 
   15322 	if (sc->sc_phytype == WMPHY_82579) {
   15323 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15324 		if (rv != 0)
   15325 			goto release;
   15326 
   15327 		data &= ~I82579_LPI_PLL_SHUT_100;
   15328 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15329 	}
   15330 
   15331 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15332 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15333 		goto release;
   15334 
   15335 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15336 release:
   15337 	sc->phy.release(sc);
   15338 
   15339 	return rv;
   15340 }
   15341 
   15342 static int
   15343 wm_set_eee(struct wm_softc *sc)
   15344 {
   15345 	struct ethercom *ec = &sc->sc_ethercom;
   15346 
   15347 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15348 		return 0;
   15349 
   15350 	if (sc->sc_type == WM_T_I354) {
   15351 		/* I354 uses an external PHY */
   15352 		return 0; /* not yet */
   15353 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15354 		return wm_set_eee_i350(sc);
   15355 	else if (sc->sc_type >= WM_T_PCH2)
   15356 		return wm_set_eee_pchlan(sc);
   15357 
   15358 	return 0;
   15359 }
   15360 
   15361 /*
   15362  * Workarounds (mainly PHY related).
   15363  * Basically, PHY's workarounds are in the PHY drivers.
   15364  */
   15365 
   15366 /* Work-around for 82566 Kumeran PCS lock loss */
   15367 static int
   15368 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15369 {
   15370 	struct mii_data *mii = &sc->sc_mii;
   15371 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15372 	int i, reg, rv;
   15373 	uint16_t phyreg;
   15374 
   15375 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15376 		device_xname(sc->sc_dev), __func__));
   15377 
   15378 	/* If the link is not up, do nothing */
   15379 	if ((status & STATUS_LU) == 0)
   15380 		return 0;
   15381 
   15382 	/* Nothing to do if the link is other than 1Gbps */
   15383 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15384 		return 0;
   15385 
   15386 	for (i = 0; i < 10; i++) {
   15387 		/* read twice */
   15388 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15389 		if (rv != 0)
   15390 			return rv;
   15391 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15392 		if (rv != 0)
   15393 			return rv;
   15394 
   15395 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15396 			goto out;	/* GOOD! */
   15397 
   15398 		/* Reset the PHY */
   15399 		wm_reset_phy(sc);
   15400 		delay(5*1000);
   15401 	}
   15402 
   15403 	/* Disable GigE link negotiation */
   15404 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15405 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15406 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15407 
   15408 	/*
   15409 	 * Call gig speed drop workaround on Gig disable before accessing
   15410 	 * any PHY registers.
   15411 	 */
   15412 	wm_gig_downshift_workaround_ich8lan(sc);
   15413 
   15414 out:
   15415 	return 0;
   15416 }
   15417 
   15418 /*
   15419  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15420  *  @sc: pointer to the HW structure
   15421  *
   15422  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15423  *  LPLU, Gig disable, MDIC PHY reset):
   15424  *    1) Set Kumeran Near-end loopback
   15425  *    2) Clear Kumeran Near-end loopback
   15426  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15427  */
   15428 static void
   15429 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15430 {
   15431 	uint16_t kmreg;
   15432 
   15433 	/* Only for igp3 */
   15434 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15435 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15436 			return;
   15437 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15438 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15439 			return;
   15440 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15441 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15442 	}
   15443 }
   15444 
   15445 /*
   15446  * Workaround for pch's PHYs
   15447  * XXX should be moved to new PHY driver?
   15448  */
   15449 static int
   15450 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15451 {
   15452 	device_t dev = sc->sc_dev;
   15453 	struct mii_data *mii = &sc->sc_mii;
   15454 	struct mii_softc *child;
   15455 	uint16_t phy_data, phyrev = 0;
   15456 	int phytype = sc->sc_phytype;
   15457 	int rv;
   15458 
   15459 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15460 		device_xname(dev), __func__));
   15461 	KASSERT(sc->sc_type == WM_T_PCH);
   15462 
   15463 	/* Set MDIO slow mode before any other MDIO access */
   15464 	if (phytype == WMPHY_82577)
   15465 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15466 			return rv;
   15467 
   15468 	child = LIST_FIRST(&mii->mii_phys);
   15469 	if (child != NULL)
   15470 		phyrev = child->mii_mpd_rev;
   15471 
   15472 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15473 	if ((child != NULL) &&
   15474 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15475 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15476 		/* Disable generation of early preamble (0x4431) */
   15477 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15478 		    &phy_data);
   15479 		if (rv != 0)
   15480 			return rv;
   15481 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15482 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15483 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15484 		    phy_data);
   15485 		if (rv != 0)
   15486 			return rv;
   15487 
   15488 		/* Preamble tuning for SSC */
   15489 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15490 		if (rv != 0)
   15491 			return rv;
   15492 	}
   15493 
   15494 	/* 82578 */
   15495 	if (phytype == WMPHY_82578) {
   15496 		/*
   15497 		 * Return registers to default by doing a soft reset then
   15498 		 * writing 0x3140 to the control register
   15499 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15500 		 */
   15501 		if ((child != NULL) && (phyrev < 2)) {
   15502 			PHY_RESET(child);
   15503 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   15504 			if (rv != 0)
   15505 				return rv;
   15506 		}
   15507 	}
   15508 
   15509 	/* Select page 0 */
   15510 	if ((rv = sc->phy.acquire(sc)) != 0)
   15511 		return rv;
   15512 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15513 	sc->phy.release(sc);
   15514 	if (rv != 0)
   15515 		return rv;
   15516 
   15517 	/*
   15518 	 * Configure the K1 Si workaround during phy reset assuming there is
   15519 	 * link so that it disables K1 if link is in 1Gbps.
   15520 	 */
   15521 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15522 		return rv;
   15523 
   15524 	/* Workaround for link disconnects on a busy hub in half duplex */
   15525 	rv = sc->phy.acquire(sc);
   15526 	if (rv)
   15527 		return rv;
   15528 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15529 	if (rv)
   15530 		goto release;
   15531 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15532 	    phy_data & 0x00ff);
   15533 	if (rv)
   15534 		goto release;
   15535 
   15536 	/* Set MSE higher to enable link to stay up when noise is high */
   15537 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15538 release:
   15539 	sc->phy.release(sc);
   15540 
   15541 	return rv;
   15542 }
   15543 
   15544 /*
   15545  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15546  *  @sc:   pointer to the HW structure
   15547  */
   15548 static void
   15549 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15550 {
   15551 	device_t dev = sc->sc_dev;
   15552 	uint32_t mac_reg;
   15553 	uint16_t i, wuce;
   15554 	int count;
   15555 
   15556 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15557 		device_xname(sc->sc_dev), __func__));
   15558 
   15559 	if (sc->phy.acquire(sc) != 0)
   15560 		return;
   15561 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15562 		goto release;
   15563 
   15564 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15565 	count = wm_rar_count(sc);
   15566 	for (i = 0; i < count; i++) {
   15567 		uint16_t lo, hi;
   15568 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15569 		lo = (uint16_t)(mac_reg & 0xffff);
   15570 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15571 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15572 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15573 
   15574 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15575 		lo = (uint16_t)(mac_reg & 0xffff);
   15576 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15577 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15578 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15579 	}
   15580 
   15581 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15582 
   15583 release:
   15584 	sc->phy.release(sc);
   15585 }
   15586 
   15587 /*
   15588  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15589  *  done after every PHY reset.
   15590  */
   15591 static int
   15592 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15593 {
   15594 	device_t dev = sc->sc_dev;
   15595 	int rv;
   15596 
   15597 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15598 		device_xname(dev), __func__));
   15599 	KASSERT(sc->sc_type == WM_T_PCH2);
   15600 
   15601 	/* Set MDIO slow mode before any other MDIO access */
   15602 	rv = wm_set_mdio_slow_mode_hv(sc);
   15603 	if (rv != 0)
   15604 		return rv;
   15605 
   15606 	rv = sc->phy.acquire(sc);
   15607 	if (rv != 0)
   15608 		return rv;
   15609 	/* Set MSE higher to enable link to stay up when noise is high */
   15610 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15611 	if (rv != 0)
   15612 		goto release;
   15613 	/* Drop link after 5 times MSE threshold was reached */
   15614 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15615 release:
   15616 	sc->phy.release(sc);
   15617 
   15618 	return rv;
   15619 }
   15620 
   15621 /**
   15622  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15623  *  @link: link up bool flag
   15624  *
   15625  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15626  *  preventing further DMA write requests.  Workaround the issue by disabling
   15627  *  the de-assertion of the clock request when in 1Gpbs mode.
   15628  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15629  *  speeds in order to avoid Tx hangs.
   15630  **/
   15631 static int
   15632 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15633 {
   15634 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15635 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15636 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15637 	uint16_t phyreg;
   15638 
   15639 	if (link && (speed == STATUS_SPEED_1000)) {
   15640 		sc->phy.acquire(sc);
   15641 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15642 		    &phyreg);
   15643 		if (rv != 0)
   15644 			goto release;
   15645 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15646 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15647 		if (rv != 0)
   15648 			goto release;
   15649 		delay(20);
   15650 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15651 
   15652 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15653 		    &phyreg);
   15654 release:
   15655 		sc->phy.release(sc);
   15656 		return rv;
   15657 	}
   15658 
   15659 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15660 
   15661 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15662 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15663 	    || !link
   15664 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15665 		goto update_fextnvm6;
   15666 
   15667 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15668 
   15669 	/* Clear link status transmit timeout */
   15670 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15671 	if (speed == STATUS_SPEED_100) {
   15672 		/* Set inband Tx timeout to 5x10us for 100Half */
   15673 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15674 
   15675 		/* Do not extend the K1 entry latency for 100Half */
   15676 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15677 	} else {
   15678 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15679 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15680 
   15681 		/* Extend the K1 entry latency for 10 Mbps */
   15682 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15683 	}
   15684 
   15685 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15686 
   15687 update_fextnvm6:
   15688 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15689 	return 0;
   15690 }
   15691 
   15692 /*
   15693  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15694  *  @sc:   pointer to the HW structure
   15695  *  @link: link up bool flag
   15696  *
   15697  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15698  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15699  *  If link is down, the function will restore the default K1 setting located
   15700  *  in the NVM.
   15701  */
   15702 static int
   15703 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15704 {
   15705 	int k1_enable = sc->sc_nvm_k1_enabled;
   15706 
   15707 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15708 		device_xname(sc->sc_dev), __func__));
   15709 
   15710 	if (sc->phy.acquire(sc) != 0)
   15711 		return -1;
   15712 
   15713 	if (link) {
   15714 		k1_enable = 0;
   15715 
   15716 		/* Link stall fix for link up */
   15717 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15718 		    0x0100);
   15719 	} else {
   15720 		/* Link stall fix for link down */
   15721 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15722 		    0x4100);
   15723 	}
   15724 
   15725 	wm_configure_k1_ich8lan(sc, k1_enable);
   15726 	sc->phy.release(sc);
   15727 
   15728 	return 0;
   15729 }
   15730 
   15731 /*
   15732  *  wm_k1_workaround_lv - K1 Si workaround
   15733  *  @sc:   pointer to the HW structure
   15734  *
   15735  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   15736  *  Disable K1 for 1000 and 100 speeds
   15737  */
   15738 static int
   15739 wm_k1_workaround_lv(struct wm_softc *sc)
   15740 {
   15741 	uint32_t reg;
   15742 	uint16_t phyreg;
   15743 	int rv;
   15744 
   15745 	if (sc->sc_type != WM_T_PCH2)
   15746 		return 0;
   15747 
   15748 	/* Set K1 beacon duration based on 10Mbps speed */
   15749 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   15750 	if (rv != 0)
   15751 		return rv;
   15752 
   15753 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   15754 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   15755 		if (phyreg &
   15756 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   15757 			/* LV 1G/100 Packet drop issue wa  */
   15758 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   15759 			    &phyreg);
   15760 			if (rv != 0)
   15761 				return rv;
   15762 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   15763 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   15764 			    phyreg);
   15765 			if (rv != 0)
   15766 				return rv;
   15767 		} else {
   15768 			/* For 10Mbps */
   15769 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   15770 			reg &= ~FEXTNVM4_BEACON_DURATION;
   15771 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   15772 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   15773 		}
   15774 	}
   15775 
   15776 	return 0;
   15777 }
   15778 
   15779 /*
   15780  *  wm_link_stall_workaround_hv - Si workaround
   15781  *  @sc: pointer to the HW structure
   15782  *
   15783  *  This function works around a Si bug where the link partner can get
   15784  *  a link up indication before the PHY does. If small packets are sent
   15785  *  by the link partner they can be placed in the packet buffer without
   15786  *  being properly accounted for by the PHY and will stall preventing
   15787  *  further packets from being received.  The workaround is to clear the
   15788  *  packet buffer after the PHY detects link up.
   15789  */
   15790 static int
   15791 wm_link_stall_workaround_hv(struct wm_softc *sc)
   15792 {
   15793 	uint16_t phyreg;
   15794 
   15795 	if (sc->sc_phytype != WMPHY_82578)
   15796 		return 0;
   15797 
   15798 	/* Do not apply workaround if in PHY loopback bit 14 set */
   15799 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   15800 	if ((phyreg & BMCR_LOOP) != 0)
   15801 		return 0;
   15802 
   15803 	/* Check if link is up and at 1Gbps */
   15804 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   15805 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15806 	    | BM_CS_STATUS_SPEED_MASK;
   15807 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15808 		| BM_CS_STATUS_SPEED_1000))
   15809 		return 0;
   15810 
   15811 	delay(200 * 1000);	/* XXX too big */
   15812 
   15813 	/* Flush the packets in the fifo buffer */
   15814 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15815 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   15816 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15817 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   15818 
   15819 	return 0;
   15820 }
   15821 
   15822 static int
   15823 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   15824 {
   15825 	int rv;
   15826 	uint16_t reg;
   15827 
   15828 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   15829 	if (rv != 0)
   15830 		return rv;
   15831 
   15832 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   15833 	    reg | HV_KMRN_MDIO_SLOW);
   15834 }
   15835 
   15836 /*
   15837  *  wm_configure_k1_ich8lan - Configure K1 power state
   15838  *  @sc: pointer to the HW structure
   15839  *  @enable: K1 state to configure
   15840  *
   15841  *  Configure the K1 power state based on the provided parameter.
   15842  *  Assumes semaphore already acquired.
   15843  */
   15844 static void
   15845 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   15846 {
   15847 	uint32_t ctrl, ctrl_ext, tmp;
   15848 	uint16_t kmreg;
   15849 	int rv;
   15850 
   15851 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15852 
   15853 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   15854 	if (rv != 0)
   15855 		return;
   15856 
   15857 	if (k1_enable)
   15858 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   15859 	else
   15860 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   15861 
   15862 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   15863 	if (rv != 0)
   15864 		return;
   15865 
   15866 	delay(20);
   15867 
   15868 	ctrl = CSR_READ(sc, WMREG_CTRL);
   15869 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   15870 
   15871 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   15872 	tmp |= CTRL_FRCSPD;
   15873 
   15874 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   15875 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   15876 	CSR_WRITE_FLUSH(sc);
   15877 	delay(20);
   15878 
   15879 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   15880 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   15881 	CSR_WRITE_FLUSH(sc);
   15882 	delay(20);
   15883 
   15884 	return;
   15885 }
   15886 
   15887 /* special case - for 82575 - need to do manual init ... */
   15888 static void
   15889 wm_reset_init_script_82575(struct wm_softc *sc)
   15890 {
   15891 	/*
   15892 	 * Remark: this is untested code - we have no board without EEPROM
   15893 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   15894 	 */
   15895 
   15896 	/* SerDes configuration via SERDESCTRL */
   15897 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   15898 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   15899 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   15900 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   15901 
   15902 	/* CCM configuration via CCMCTL register */
   15903 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   15904 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   15905 
   15906 	/* PCIe lanes configuration */
   15907 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   15908 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   15909 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   15910 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   15911 
   15912 	/* PCIe PLL Configuration */
   15913 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   15914 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   15915 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   15916 }
   15917 
   15918 static void
   15919 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   15920 {
   15921 	uint32_t reg;
   15922 	uint16_t nvmword;
   15923 	int rv;
   15924 
   15925 	if (sc->sc_type != WM_T_82580)
   15926 		return;
   15927 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   15928 		return;
   15929 
   15930 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   15931 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   15932 	if (rv != 0) {
   15933 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   15934 		    __func__);
   15935 		return;
   15936 	}
   15937 
   15938 	reg = CSR_READ(sc, WMREG_MDICNFG);
   15939 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   15940 		reg |= MDICNFG_DEST;
   15941 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   15942 		reg |= MDICNFG_COM_MDIO;
   15943 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15944 }
   15945 
   15946 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   15947 
   15948 static bool
   15949 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   15950 {
   15951 	uint32_t reg;
   15952 	uint16_t id1, id2;
   15953 	int i, rv;
   15954 
   15955 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15956 		device_xname(sc->sc_dev), __func__));
   15957 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15958 
   15959 	id1 = id2 = 0xffff;
   15960 	for (i = 0; i < 2; i++) {
   15961 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   15962 		    &id1);
   15963 		if ((rv != 0) || MII_INVALIDID(id1))
   15964 			continue;
   15965 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   15966 		    &id2);
   15967 		if ((rv != 0) || MII_INVALIDID(id2))
   15968 			continue;
   15969 		break;
   15970 	}
   15971 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   15972 		goto out;
   15973 
   15974 	/*
   15975 	 * In case the PHY needs to be in mdio slow mode,
   15976 	 * set slow mode and try to get the PHY id again.
   15977 	 */
   15978 	rv = 0;
   15979 	if (sc->sc_type < WM_T_PCH_LPT) {
   15980 		sc->phy.release(sc);
   15981 		wm_set_mdio_slow_mode_hv(sc);
   15982 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   15983 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   15984 		sc->phy.acquire(sc);
   15985 	}
   15986 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   15987 		device_printf(sc->sc_dev, "XXX return with false\n");
   15988 		return false;
   15989 	}
   15990 out:
   15991 	if (sc->sc_type >= WM_T_PCH_LPT) {
   15992 		/* Only unforce SMBus if ME is not active */
   15993 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15994 			uint16_t phyreg;
   15995 
   15996 			/* Unforce SMBus mode in PHY */
   15997 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   15998 			    CV_SMB_CTRL, &phyreg);
   15999 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16000 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16001 			    CV_SMB_CTRL, phyreg);
   16002 
   16003 			/* Unforce SMBus mode in MAC */
   16004 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16005 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16006 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16007 		}
   16008 	}
   16009 	return true;
   16010 }
   16011 
   16012 static void
   16013 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16014 {
   16015 	uint32_t reg;
   16016 	int i;
   16017 
   16018 	/* Set PHY Config Counter to 50msec */
   16019 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16020 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16021 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16022 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16023 
   16024 	/* Toggle LANPHYPC */
   16025 	reg = CSR_READ(sc, WMREG_CTRL);
   16026 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16027 	reg &= ~CTRL_LANPHYPC_VALUE;
   16028 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16029 	CSR_WRITE_FLUSH(sc);
   16030 	delay(1000);
   16031 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16032 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16033 	CSR_WRITE_FLUSH(sc);
   16034 
   16035 	if (sc->sc_type < WM_T_PCH_LPT)
   16036 		delay(50 * 1000);
   16037 	else {
   16038 		i = 20;
   16039 
   16040 		do {
   16041 			delay(5 * 1000);
   16042 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16043 		    && i--);
   16044 
   16045 		delay(30 * 1000);
   16046 	}
   16047 }
   16048 
   16049 static int
   16050 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16051 {
   16052 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16053 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16054 	uint32_t rxa;
   16055 	uint16_t scale = 0, lat_enc = 0;
   16056 	int32_t obff_hwm = 0;
   16057 	int64_t lat_ns, value;
   16058 
   16059 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16060 		device_xname(sc->sc_dev), __func__));
   16061 
   16062 	if (link) {
   16063 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16064 		uint32_t status;
   16065 		uint16_t speed;
   16066 		pcireg_t preg;
   16067 
   16068 		status = CSR_READ(sc, WMREG_STATUS);
   16069 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16070 		case STATUS_SPEED_10:
   16071 			speed = 10;
   16072 			break;
   16073 		case STATUS_SPEED_100:
   16074 			speed = 100;
   16075 			break;
   16076 		case STATUS_SPEED_1000:
   16077 			speed = 1000;
   16078 			break;
   16079 		default:
   16080 			device_printf(sc->sc_dev, "Unknown speed "
   16081 			    "(status = %08x)\n", status);
   16082 			return -1;
   16083 		}
   16084 
   16085 		/* Rx Packet Buffer Allocation size (KB) */
   16086 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16087 
   16088 		/*
   16089 		 * Determine the maximum latency tolerated by the device.
   16090 		 *
   16091 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16092 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16093 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16094 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16095 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16096 		 */
   16097 		lat_ns = ((int64_t)rxa * 1024 -
   16098 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16099 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16100 		if (lat_ns < 0)
   16101 			lat_ns = 0;
   16102 		else
   16103 			lat_ns /= speed;
   16104 		value = lat_ns;
   16105 
   16106 		while (value > LTRV_VALUE) {
   16107 			scale ++;
   16108 			value = howmany(value, __BIT(5));
   16109 		}
   16110 		if (scale > LTRV_SCALE_MAX) {
   16111 			device_printf(sc->sc_dev,
   16112 			    "Invalid LTR latency scale %d\n", scale);
   16113 			return -1;
   16114 		}
   16115 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16116 
   16117 		/* Determine the maximum latency tolerated by the platform */
   16118 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16119 		    WM_PCI_LTR_CAP_LPT);
   16120 		max_snoop = preg & 0xffff;
   16121 		max_nosnoop = preg >> 16;
   16122 
   16123 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16124 
   16125 		if (lat_enc > max_ltr_enc) {
   16126 			lat_enc = max_ltr_enc;
   16127 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16128 			    * PCI_LTR_SCALETONS(
   16129 				    __SHIFTOUT(lat_enc,
   16130 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16131 		}
   16132 
   16133 		if (lat_ns) {
   16134 			lat_ns *= speed * 1000;
   16135 			lat_ns /= 8;
   16136 			lat_ns /= 1000000000;
   16137 			obff_hwm = (int32_t)(rxa - lat_ns);
   16138 		}
   16139 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16140 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16141 			    "(rxa = %d, lat_ns = %d)\n",
   16142 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16143 			return -1;
   16144 		}
   16145 	}
   16146 	/* Snoop and No-Snoop latencies the same */
   16147 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16148 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16149 
   16150 	/* Set OBFF high water mark */
   16151 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16152 	reg |= obff_hwm;
   16153 	CSR_WRITE(sc, WMREG_SVT, reg);
   16154 
   16155 	/* Enable OBFF */
   16156 	reg = CSR_READ(sc, WMREG_SVCR);
   16157 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16158 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16159 
   16160 	return 0;
   16161 }
   16162 
   16163 /*
   16164  * I210 Errata 25 and I211 Errata 10
   16165  * Slow System Clock.
   16166  */
   16167 static int
   16168 wm_pll_workaround_i210(struct wm_softc *sc)
   16169 {
   16170 	uint32_t mdicnfg, wuc;
   16171 	uint32_t reg;
   16172 	pcireg_t pcireg;
   16173 	uint32_t pmreg;
   16174 	uint16_t nvmword, tmp_nvmword;
   16175 	uint16_t phyval;
   16176 	bool wa_done = false;
   16177 	int i, rv = 0;
   16178 
   16179 	/* Get Power Management cap offset */
   16180 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16181 	    &pmreg, NULL) == 0)
   16182 		return -1;
   16183 
   16184 	/* Save WUC and MDICNFG registers */
   16185 	wuc = CSR_READ(sc, WMREG_WUC);
   16186 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16187 
   16188 	reg = mdicnfg & ~MDICNFG_DEST;
   16189 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16190 
   16191 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   16192 		nvmword = INVM_DEFAULT_AL;
   16193 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16194 
   16195 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16196 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16197 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16198 
   16199 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16200 			rv = 0;
   16201 			break; /* OK */
   16202 		} else
   16203 			rv = -1;
   16204 
   16205 		wa_done = true;
   16206 		/* Directly reset the internal PHY */
   16207 		reg = CSR_READ(sc, WMREG_CTRL);
   16208 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16209 
   16210 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16211 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16212 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16213 
   16214 		CSR_WRITE(sc, WMREG_WUC, 0);
   16215 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16216 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16217 
   16218 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16219 		    pmreg + PCI_PMCSR);
   16220 		pcireg |= PCI_PMCSR_STATE_D3;
   16221 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16222 		    pmreg + PCI_PMCSR, pcireg);
   16223 		delay(1000);
   16224 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16225 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16226 		    pmreg + PCI_PMCSR, pcireg);
   16227 
   16228 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16229 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16230 
   16231 		/* Restore WUC register */
   16232 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16233 	}
   16234 
   16235 	/* Restore MDICNFG setting */
   16236 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16237 	if (wa_done)
   16238 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16239 	return rv;
   16240 }
   16241 
   16242 static void
   16243 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16244 {
   16245 	uint32_t reg;
   16246 
   16247 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16248 		device_xname(sc->sc_dev), __func__));
   16249 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16250 	    || (sc->sc_type == WM_T_PCH_CNP));
   16251 
   16252 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16253 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16254 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16255 
   16256 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16257 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16258 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16259 }
   16260